##// END OF EJS Templates
merge with stable
Matt Mackall -
r16261:7b9bf724 merge default
parent child Browse files
Show More
@@ -1,748 +1,744
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import struct, os, stat, errno
12 import struct, os, stat, errno
13 import cStringIO
13 import cStringIO
14
14
15 _format = ">cllll"
15 _format = ">cllll"
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17 filecache = scmutil.filecache
17 filecache = scmutil.filecache
18
18
19 class repocache(filecache):
19 class repocache(filecache):
20 """filecache for files in .hg/"""
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
22 return obj._opener.join(fname)
23
23
24 class rootcache(filecache):
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
26 def join(self, obj, fname):
27 return obj._join(fname)
27 return obj._join(fname)
28
28
29 def _finddirs(path):
29 def _finddirs(path):
30 pos = path.rfind('/')
30 pos = path.rfind('/')
31 while pos != -1:
31 while pos != -1:
32 yield path[:pos]
32 yield path[:pos]
33 pos = path.rfind('/', 0, pos)
33 pos = path.rfind('/', 0, pos)
34
34
35 def _incdirs(dirs, path):
35 def _incdirs(dirs, path):
36 for base in _finddirs(path):
36 for base in _finddirs(path):
37 if base in dirs:
37 if base in dirs:
38 dirs[base] += 1
38 dirs[base] += 1
39 return
39 return
40 dirs[base] = 1
40 dirs[base] = 1
41
41
42 def _decdirs(dirs, path):
42 def _decdirs(dirs, path):
43 for base in _finddirs(path):
43 for base in _finddirs(path):
44 if dirs[base] > 1:
44 if dirs[base] > 1:
45 dirs[base] -= 1
45 dirs[base] -= 1
46 return
46 return
47 del dirs[base]
47 del dirs[base]
48
48
49 class dirstate(object):
49 class dirstate(object):
50
50
51 def __init__(self, opener, ui, root, validate):
51 def __init__(self, opener, ui, root, validate):
52 '''Create a new dirstate object.
52 '''Create a new dirstate object.
53
53
54 opener is an open()-like callable that can be used to open the
54 opener is an open()-like callable that can be used to open the
55 dirstate file; root is the root of the directory tracked by
55 dirstate file; root is the root of the directory tracked by
56 the dirstate.
56 the dirstate.
57 '''
57 '''
58 self._opener = opener
58 self._opener = opener
59 self._validate = validate
59 self._validate = validate
60 self._root = root
60 self._root = root
61 self._rootdir = os.path.join(root, '')
61 self._rootdir = os.path.join(root, '')
62 self._dirty = False
62 self._dirty = False
63 self._dirtypl = False
63 self._dirtypl = False
64 self._lastnormaltime = 0
64 self._lastnormaltime = 0
65 self._ui = ui
65 self._ui = ui
66 self._filecache = {}
66 self._filecache = {}
67
67
68 @propertycache
68 @propertycache
69 def _map(self):
69 def _map(self):
70 '''Return the dirstate contents as a map from filename to
70 '''Return the dirstate contents as a map from filename to
71 (state, mode, size, time).'''
71 (state, mode, size, time).'''
72 self._read()
72 self._read()
73 return self._map
73 return self._map
74
74
75 @propertycache
75 @propertycache
76 def _copymap(self):
76 def _copymap(self):
77 self._read()
77 self._read()
78 return self._copymap
78 return self._copymap
79
79
80 @propertycache
80 @propertycache
81 def _normroot(self):
82 return util.normcase(self._root)
83
84 @propertycache
85 def _foldmap(self):
81 def _foldmap(self):
86 f = {}
82 f = {}
87 for name in self._map:
83 for name in self._map:
88 f[util.normcase(name)] = name
84 f[util.normcase(name)] = name
89 f['.'] = '.' # prevents useless util.fspath() invocation
85 f['.'] = '.' # prevents useless util.fspath() invocation
90 return f
86 return f
91
87
92 @repocache('branch')
88 @repocache('branch')
93 def _branch(self):
89 def _branch(self):
94 try:
90 try:
95 return self._opener.read("branch").strip() or "default"
91 return self._opener.read("branch").strip() or "default"
96 except IOError, inst:
92 except IOError, inst:
97 if inst.errno != errno.ENOENT:
93 if inst.errno != errno.ENOENT:
98 raise
94 raise
99 return "default"
95 return "default"
100
96
101 @propertycache
97 @propertycache
102 def _pl(self):
98 def _pl(self):
103 try:
99 try:
104 fp = self._opener("dirstate")
100 fp = self._opener("dirstate")
105 st = fp.read(40)
101 st = fp.read(40)
106 fp.close()
102 fp.close()
107 l = len(st)
103 l = len(st)
108 if l == 40:
104 if l == 40:
109 return st[:20], st[20:40]
105 return st[:20], st[20:40]
110 elif l > 0 and l < 40:
106 elif l > 0 and l < 40:
111 raise util.Abort(_('working directory state appears damaged!'))
107 raise util.Abort(_('working directory state appears damaged!'))
112 except IOError, err:
108 except IOError, err:
113 if err.errno != errno.ENOENT:
109 if err.errno != errno.ENOENT:
114 raise
110 raise
115 return [nullid, nullid]
111 return [nullid, nullid]
116
112
117 @propertycache
113 @propertycache
118 def _dirs(self):
114 def _dirs(self):
119 dirs = {}
115 dirs = {}
120 for f, s in self._map.iteritems():
116 for f, s in self._map.iteritems():
121 if s[0] != 'r':
117 if s[0] != 'r':
122 _incdirs(dirs, f)
118 _incdirs(dirs, f)
123 return dirs
119 return dirs
124
120
125 def dirs(self):
121 def dirs(self):
126 return self._dirs
122 return self._dirs
127
123
128 @rootcache('.hgignore')
124 @rootcache('.hgignore')
129 def _ignore(self):
125 def _ignore(self):
130 files = [self._join('.hgignore')]
126 files = [self._join('.hgignore')]
131 for name, path in self._ui.configitems("ui"):
127 for name, path in self._ui.configitems("ui"):
132 if name == 'ignore' or name.startswith('ignore.'):
128 if name == 'ignore' or name.startswith('ignore.'):
133 files.append(util.expandpath(path))
129 files.append(util.expandpath(path))
134 return ignore.ignore(self._root, files, self._ui.warn)
130 return ignore.ignore(self._root, files, self._ui.warn)
135
131
136 @propertycache
132 @propertycache
137 def _slash(self):
133 def _slash(self):
138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
134 return self._ui.configbool('ui', 'slash') and os.sep != '/'
139
135
140 @propertycache
136 @propertycache
141 def _checklink(self):
137 def _checklink(self):
142 return util.checklink(self._root)
138 return util.checklink(self._root)
143
139
144 @propertycache
140 @propertycache
145 def _checkexec(self):
141 def _checkexec(self):
146 return util.checkexec(self._root)
142 return util.checkexec(self._root)
147
143
148 @propertycache
144 @propertycache
149 def _checkcase(self):
145 def _checkcase(self):
150 return not util.checkcase(self._join('.hg'))
146 return not util.checkcase(self._join('.hg'))
151
147
152 def _join(self, f):
148 def _join(self, f):
153 # much faster than os.path.join()
149 # much faster than os.path.join()
154 # it's safe because f is always a relative path
150 # it's safe because f is always a relative path
155 return self._rootdir + f
151 return self._rootdir + f
156
152
157 def flagfunc(self, buildfallback):
153 def flagfunc(self, buildfallback):
158 if self._checklink and self._checkexec:
154 if self._checklink and self._checkexec:
159 def f(x):
155 def f(x):
160 p = self._join(x)
156 p = self._join(x)
161 if os.path.islink(p):
157 if os.path.islink(p):
162 return 'l'
158 return 'l'
163 if util.isexec(p):
159 if util.isexec(p):
164 return 'x'
160 return 'x'
165 return ''
161 return ''
166 return f
162 return f
167
163
168 fallback = buildfallback()
164 fallback = buildfallback()
169 if self._checklink:
165 if self._checklink:
170 def f(x):
166 def f(x):
171 if os.path.islink(self._join(x)):
167 if os.path.islink(self._join(x)):
172 return 'l'
168 return 'l'
173 if 'x' in fallback(x):
169 if 'x' in fallback(x):
174 return 'x'
170 return 'x'
175 return ''
171 return ''
176 return f
172 return f
177 if self._checkexec:
173 if self._checkexec:
178 def f(x):
174 def f(x):
179 if 'l' in fallback(x):
175 if 'l' in fallback(x):
180 return 'l'
176 return 'l'
181 if util.isexec(self._join(x)):
177 if util.isexec(self._join(x)):
182 return 'x'
178 return 'x'
183 return ''
179 return ''
184 return f
180 return f
185 else:
181 else:
186 return fallback
182 return fallback
187
183
188 def getcwd(self):
184 def getcwd(self):
189 cwd = os.getcwd()
185 cwd = os.getcwd()
190 if cwd == self._root:
186 if cwd == self._root:
191 return ''
187 return ''
192 # self._root ends with a path separator if self._root is '/' or 'C:\'
188 # self._root ends with a path separator if self._root is '/' or 'C:\'
193 rootsep = self._root
189 rootsep = self._root
194 if not util.endswithsep(rootsep):
190 if not util.endswithsep(rootsep):
195 rootsep += os.sep
191 rootsep += os.sep
196 if cwd.startswith(rootsep):
192 if cwd.startswith(rootsep):
197 return cwd[len(rootsep):]
193 return cwd[len(rootsep):]
198 else:
194 else:
199 # we're outside the repo. return an absolute path.
195 # we're outside the repo. return an absolute path.
200 return cwd
196 return cwd
201
197
202 def pathto(self, f, cwd=None):
198 def pathto(self, f, cwd=None):
203 if cwd is None:
199 if cwd is None:
204 cwd = self.getcwd()
200 cwd = self.getcwd()
205 path = util.pathto(self._root, cwd, f)
201 path = util.pathto(self._root, cwd, f)
206 if self._slash:
202 if self._slash:
207 return util.normpath(path)
203 return util.normpath(path)
208 return path
204 return path
209
205
210 def __getitem__(self, key):
206 def __getitem__(self, key):
211 '''Return the current state of key (a filename) in the dirstate.
207 '''Return the current state of key (a filename) in the dirstate.
212
208
213 States are:
209 States are:
214 n normal
210 n normal
215 m needs merging
211 m needs merging
216 r marked for removal
212 r marked for removal
217 a marked for addition
213 a marked for addition
218 ? not tracked
214 ? not tracked
219 '''
215 '''
220 return self._map.get(key, ("?",))[0]
216 return self._map.get(key, ("?",))[0]
221
217
222 def __contains__(self, key):
218 def __contains__(self, key):
223 return key in self._map
219 return key in self._map
224
220
225 def __iter__(self):
221 def __iter__(self):
226 for x in sorted(self._map):
222 for x in sorted(self._map):
227 yield x
223 yield x
228
224
229 def parents(self):
225 def parents(self):
230 return [self._validate(p) for p in self._pl]
226 return [self._validate(p) for p in self._pl]
231
227
232 def p1(self):
228 def p1(self):
233 return self._validate(self._pl[0])
229 return self._validate(self._pl[0])
234
230
235 def p2(self):
231 def p2(self):
236 return self._validate(self._pl[1])
232 return self._validate(self._pl[1])
237
233
238 def branch(self):
234 def branch(self):
239 return encoding.tolocal(self._branch)
235 return encoding.tolocal(self._branch)
240
236
241 def setparents(self, p1, p2=nullid):
237 def setparents(self, p1, p2=nullid):
242 self._dirty = self._dirtypl = True
238 self._dirty = self._dirtypl = True
243 self._pl = p1, p2
239 self._pl = p1, p2
244
240
245 def setbranch(self, branch):
241 def setbranch(self, branch):
246 if branch in ['tip', '.', 'null']:
242 if branch in ['tip', '.', 'null']:
247 raise util.Abort(_('the name \'%s\' is reserved') % branch)
243 raise util.Abort(_('the name \'%s\' is reserved') % branch)
248 self._branch = encoding.fromlocal(branch)
244 self._branch = encoding.fromlocal(branch)
249 self._opener.write("branch", self._branch + '\n')
245 self._opener.write("branch", self._branch + '\n')
250
246
251 def _read(self):
247 def _read(self):
252 self._map = {}
248 self._map = {}
253 self._copymap = {}
249 self._copymap = {}
254 try:
250 try:
255 st = self._opener.read("dirstate")
251 st = self._opener.read("dirstate")
256 except IOError, err:
252 except IOError, err:
257 if err.errno != errno.ENOENT:
253 if err.errno != errno.ENOENT:
258 raise
254 raise
259 return
255 return
260 if not st:
256 if not st:
261 return
257 return
262
258
263 p = parsers.parse_dirstate(self._map, self._copymap, st)
259 p = parsers.parse_dirstate(self._map, self._copymap, st)
264 if not self._dirtypl:
260 if not self._dirtypl:
265 self._pl = p
261 self._pl = p
266
262
267 def invalidate(self):
263 def invalidate(self):
268 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
264 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
269 "_ignore"):
265 "_ignore"):
270 if a in self.__dict__:
266 if a in self.__dict__:
271 delattr(self, a)
267 delattr(self, a)
272 self._lastnormaltime = 0
268 self._lastnormaltime = 0
273 self._dirty = False
269 self._dirty = False
274
270
275 def copy(self, source, dest):
271 def copy(self, source, dest):
276 """Mark dest as a copy of source. Unmark dest if source is None."""
272 """Mark dest as a copy of source. Unmark dest if source is None."""
277 if source == dest:
273 if source == dest:
278 return
274 return
279 self._dirty = True
275 self._dirty = True
280 if source is not None:
276 if source is not None:
281 self._copymap[dest] = source
277 self._copymap[dest] = source
282 elif dest in self._copymap:
278 elif dest in self._copymap:
283 del self._copymap[dest]
279 del self._copymap[dest]
284
280
285 def copied(self, file):
281 def copied(self, file):
286 return self._copymap.get(file, None)
282 return self._copymap.get(file, None)
287
283
288 def copies(self):
284 def copies(self):
289 return self._copymap
285 return self._copymap
290
286
291 def _droppath(self, f):
287 def _droppath(self, f):
292 if self[f] not in "?r" and "_dirs" in self.__dict__:
288 if self[f] not in "?r" and "_dirs" in self.__dict__:
293 _decdirs(self._dirs, f)
289 _decdirs(self._dirs, f)
294
290
295 def _addpath(self, f, check=False):
291 def _addpath(self, f, check=False):
296 oldstate = self[f]
292 oldstate = self[f]
297 if check or oldstate == "r":
293 if check or oldstate == "r":
298 scmutil.checkfilename(f)
294 scmutil.checkfilename(f)
299 if f in self._dirs:
295 if f in self._dirs:
300 raise util.Abort(_('directory %r already in dirstate') % f)
296 raise util.Abort(_('directory %r already in dirstate') % f)
301 # shadows
297 # shadows
302 for d in _finddirs(f):
298 for d in _finddirs(f):
303 if d in self._dirs:
299 if d in self._dirs:
304 break
300 break
305 if d in self._map and self[d] != 'r':
301 if d in self._map and self[d] != 'r':
306 raise util.Abort(
302 raise util.Abort(
307 _('file %r in dirstate clashes with %r') % (d, f))
303 _('file %r in dirstate clashes with %r') % (d, f))
308 if oldstate in "?r" and "_dirs" in self.__dict__:
304 if oldstate in "?r" and "_dirs" in self.__dict__:
309 _incdirs(self._dirs, f)
305 _incdirs(self._dirs, f)
310
306
311 def normal(self, f):
307 def normal(self, f):
312 '''Mark a file normal and clean.'''
308 '''Mark a file normal and clean.'''
313 self._dirty = True
309 self._dirty = True
314 self._addpath(f)
310 self._addpath(f)
315 s = os.lstat(self._join(f))
311 s = os.lstat(self._join(f))
316 mtime = int(s.st_mtime)
312 mtime = int(s.st_mtime)
317 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
313 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
318 if f in self._copymap:
314 if f in self._copymap:
319 del self._copymap[f]
315 del self._copymap[f]
320 if mtime > self._lastnormaltime:
316 if mtime > self._lastnormaltime:
321 # Remember the most recent modification timeslot for status(),
317 # Remember the most recent modification timeslot for status(),
322 # to make sure we won't miss future size-preserving file content
318 # to make sure we won't miss future size-preserving file content
323 # modifications that happen within the same timeslot.
319 # modifications that happen within the same timeslot.
324 self._lastnormaltime = mtime
320 self._lastnormaltime = mtime
325
321
326 def normallookup(self, f):
322 def normallookup(self, f):
327 '''Mark a file normal, but possibly dirty.'''
323 '''Mark a file normal, but possibly dirty.'''
328 if self._pl[1] != nullid and f in self._map:
324 if self._pl[1] != nullid and f in self._map:
329 # if there is a merge going on and the file was either
325 # if there is a merge going on and the file was either
330 # in state 'm' (-1) or coming from other parent (-2) before
326 # in state 'm' (-1) or coming from other parent (-2) before
331 # being removed, restore that state.
327 # being removed, restore that state.
332 entry = self._map[f]
328 entry = self._map[f]
333 if entry[0] == 'r' and entry[2] in (-1, -2):
329 if entry[0] == 'r' and entry[2] in (-1, -2):
334 source = self._copymap.get(f)
330 source = self._copymap.get(f)
335 if entry[2] == -1:
331 if entry[2] == -1:
336 self.merge(f)
332 self.merge(f)
337 elif entry[2] == -2:
333 elif entry[2] == -2:
338 self.otherparent(f)
334 self.otherparent(f)
339 if source:
335 if source:
340 self.copy(source, f)
336 self.copy(source, f)
341 return
337 return
342 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
338 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
343 return
339 return
344 self._dirty = True
340 self._dirty = True
345 self._addpath(f)
341 self._addpath(f)
346 self._map[f] = ('n', 0, -1, -1)
342 self._map[f] = ('n', 0, -1, -1)
347 if f in self._copymap:
343 if f in self._copymap:
348 del self._copymap[f]
344 del self._copymap[f]
349
345
350 def otherparent(self, f):
346 def otherparent(self, f):
351 '''Mark as coming from the other parent, always dirty.'''
347 '''Mark as coming from the other parent, always dirty.'''
352 if self._pl[1] == nullid:
348 if self._pl[1] == nullid:
353 raise util.Abort(_("setting %r to other parent "
349 raise util.Abort(_("setting %r to other parent "
354 "only allowed in merges") % f)
350 "only allowed in merges") % f)
355 self._dirty = True
351 self._dirty = True
356 self._addpath(f)
352 self._addpath(f)
357 self._map[f] = ('n', 0, -2, -1)
353 self._map[f] = ('n', 0, -2, -1)
358 if f in self._copymap:
354 if f in self._copymap:
359 del self._copymap[f]
355 del self._copymap[f]
360
356
361 def add(self, f):
357 def add(self, f):
362 '''Mark a file added.'''
358 '''Mark a file added.'''
363 self._dirty = True
359 self._dirty = True
364 self._addpath(f, True)
360 self._addpath(f, True)
365 self._map[f] = ('a', 0, -1, -1)
361 self._map[f] = ('a', 0, -1, -1)
366 if f in self._copymap:
362 if f in self._copymap:
367 del self._copymap[f]
363 del self._copymap[f]
368
364
369 def remove(self, f):
365 def remove(self, f):
370 '''Mark a file removed.'''
366 '''Mark a file removed.'''
371 self._dirty = True
367 self._dirty = True
372 self._droppath(f)
368 self._droppath(f)
373 size = 0
369 size = 0
374 if self._pl[1] != nullid and f in self._map:
370 if self._pl[1] != nullid and f in self._map:
375 # backup the previous state
371 # backup the previous state
376 entry = self._map[f]
372 entry = self._map[f]
377 if entry[0] == 'm': # merge
373 if entry[0] == 'm': # merge
378 size = -1
374 size = -1
379 elif entry[0] == 'n' and entry[2] == -2: # other parent
375 elif entry[0] == 'n' and entry[2] == -2: # other parent
380 size = -2
376 size = -2
381 self._map[f] = ('r', 0, size, 0)
377 self._map[f] = ('r', 0, size, 0)
382 if size == 0 and f in self._copymap:
378 if size == 0 and f in self._copymap:
383 del self._copymap[f]
379 del self._copymap[f]
384
380
385 def merge(self, f):
381 def merge(self, f):
386 '''Mark a file merged.'''
382 '''Mark a file merged.'''
387 self._dirty = True
383 self._dirty = True
388 s = os.lstat(self._join(f))
384 s = os.lstat(self._join(f))
389 self._addpath(f)
385 self._addpath(f)
390 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
386 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
391 if f in self._copymap:
387 if f in self._copymap:
392 del self._copymap[f]
388 del self._copymap[f]
393
389
394 def drop(self, f):
390 def drop(self, f):
395 '''Drop a file from the dirstate'''
391 '''Drop a file from the dirstate'''
396 if f in self._map:
392 if f in self._map:
397 self._dirty = True
393 self._dirty = True
398 self._droppath(f)
394 self._droppath(f)
399 del self._map[f]
395 del self._map[f]
400
396
401 def _normalize(self, path, isknown):
397 def _normalize(self, path, isknown):
402 normed = util.normcase(path)
398 normed = util.normcase(path)
403 folded = self._foldmap.get(normed, None)
399 folded = self._foldmap.get(normed, None)
404 if folded is None:
400 if folded is None:
405 if isknown or not os.path.lexists(os.path.join(self._root, path)):
401 if isknown or not os.path.lexists(os.path.join(self._root, path)):
406 folded = path
402 folded = path
407 else:
403 else:
408 folded = self._foldmap.setdefault(normed,
404 folded = self._foldmap.setdefault(normed,
409 util.fspath(normed, self._normroot))
405 util.fspath(normed, self._root))
410 return folded
406 return folded
411
407
412 def normalize(self, path, isknown=False):
408 def normalize(self, path, isknown=False):
413 '''
409 '''
414 normalize the case of a pathname when on a casefolding filesystem
410 normalize the case of a pathname when on a casefolding filesystem
415
411
416 isknown specifies whether the filename came from walking the
412 isknown specifies whether the filename came from walking the
417 disk, to avoid extra filesystem access
413 disk, to avoid extra filesystem access
418
414
419 The normalized case is determined based on the following precedence:
415 The normalized case is determined based on the following precedence:
420
416
421 - version of name already stored in the dirstate
417 - version of name already stored in the dirstate
422 - version of name stored on disk
418 - version of name stored on disk
423 - version provided via command arguments
419 - version provided via command arguments
424 '''
420 '''
425
421
426 if self._checkcase:
422 if self._checkcase:
427 return self._normalize(path, isknown)
423 return self._normalize(path, isknown)
428 return path
424 return path
429
425
430 def clear(self):
426 def clear(self):
431 self._map = {}
427 self._map = {}
432 if "_dirs" in self.__dict__:
428 if "_dirs" in self.__dict__:
433 delattr(self, "_dirs")
429 delattr(self, "_dirs")
434 self._copymap = {}
430 self._copymap = {}
435 self._pl = [nullid, nullid]
431 self._pl = [nullid, nullid]
436 self._lastnormaltime = 0
432 self._lastnormaltime = 0
437 self._dirty = True
433 self._dirty = True
438
434
439 def rebuild(self, parent, files):
435 def rebuild(self, parent, files):
440 self.clear()
436 self.clear()
441 for f in files:
437 for f in files:
442 if 'x' in files.flags(f):
438 if 'x' in files.flags(f):
443 self._map[f] = ('n', 0777, -1, 0)
439 self._map[f] = ('n', 0777, -1, 0)
444 else:
440 else:
445 self._map[f] = ('n', 0666, -1, 0)
441 self._map[f] = ('n', 0666, -1, 0)
446 self._pl = (parent, nullid)
442 self._pl = (parent, nullid)
447 self._dirty = True
443 self._dirty = True
448
444
449 def write(self):
445 def write(self):
450 if not self._dirty:
446 if not self._dirty:
451 return
447 return
452 st = self._opener("dirstate", "w", atomictemp=True)
448 st = self._opener("dirstate", "w", atomictemp=True)
453
449
454 # use the modification time of the newly created temporary file as the
450 # use the modification time of the newly created temporary file as the
455 # filesystem's notion of 'now'
451 # filesystem's notion of 'now'
456 now = int(util.fstat(st).st_mtime)
452 now = int(util.fstat(st).st_mtime)
457
453
458 cs = cStringIO.StringIO()
454 cs = cStringIO.StringIO()
459 copymap = self._copymap
455 copymap = self._copymap
460 pack = struct.pack
456 pack = struct.pack
461 write = cs.write
457 write = cs.write
462 write("".join(self._pl))
458 write("".join(self._pl))
463 for f, e in self._map.iteritems():
459 for f, e in self._map.iteritems():
464 if e[0] == 'n' and e[3] == now:
460 if e[0] == 'n' and e[3] == now:
465 # The file was last modified "simultaneously" with the current
461 # The file was last modified "simultaneously" with the current
466 # write to dirstate (i.e. within the same second for file-
462 # write to dirstate (i.e. within the same second for file-
467 # systems with a granularity of 1 sec). This commonly happens
463 # systems with a granularity of 1 sec). This commonly happens
468 # for at least a couple of files on 'update'.
464 # for at least a couple of files on 'update'.
469 # The user could change the file without changing its size
465 # The user could change the file without changing its size
470 # within the same second. Invalidate the file's stat data in
466 # within the same second. Invalidate the file's stat data in
471 # dirstate, forcing future 'status' calls to compare the
467 # dirstate, forcing future 'status' calls to compare the
472 # contents of the file. This prevents mistakenly treating such
468 # contents of the file. This prevents mistakenly treating such
473 # files as clean.
469 # files as clean.
474 e = (e[0], 0, -1, -1) # mark entry as 'unset'
470 e = (e[0], 0, -1, -1) # mark entry as 'unset'
475 self._map[f] = e
471 self._map[f] = e
476
472
477 if f in copymap:
473 if f in copymap:
478 f = "%s\0%s" % (f, copymap[f])
474 f = "%s\0%s" % (f, copymap[f])
479 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
475 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
480 write(e)
476 write(e)
481 write(f)
477 write(f)
482 st.write(cs.getvalue())
478 st.write(cs.getvalue())
483 st.close()
479 st.close()
484 self._lastnormaltime = 0
480 self._lastnormaltime = 0
485 self._dirty = self._dirtypl = False
481 self._dirty = self._dirtypl = False
486
482
487 def _dirignore(self, f):
483 def _dirignore(self, f):
488 if f == '.':
484 if f == '.':
489 return False
485 return False
490 if self._ignore(f):
486 if self._ignore(f):
491 return True
487 return True
492 for p in _finddirs(f):
488 for p in _finddirs(f):
493 if self._ignore(p):
489 if self._ignore(p):
494 return True
490 return True
495 return False
491 return False
496
492
497 def walk(self, match, subrepos, unknown, ignored):
493 def walk(self, match, subrepos, unknown, ignored):
498 '''
494 '''
499 Walk recursively through the directory tree, finding all files
495 Walk recursively through the directory tree, finding all files
500 matched by match.
496 matched by match.
501
497
502 Return a dict mapping filename to stat-like object (either
498 Return a dict mapping filename to stat-like object (either
503 mercurial.osutil.stat instance or return value of os.stat()).
499 mercurial.osutil.stat instance or return value of os.stat()).
504 '''
500 '''
505
501
506 def fwarn(f, msg):
502 def fwarn(f, msg):
507 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
503 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
508 return False
504 return False
509
505
510 def badtype(mode):
506 def badtype(mode):
511 kind = _('unknown')
507 kind = _('unknown')
512 if stat.S_ISCHR(mode):
508 if stat.S_ISCHR(mode):
513 kind = _('character device')
509 kind = _('character device')
514 elif stat.S_ISBLK(mode):
510 elif stat.S_ISBLK(mode):
515 kind = _('block device')
511 kind = _('block device')
516 elif stat.S_ISFIFO(mode):
512 elif stat.S_ISFIFO(mode):
517 kind = _('fifo')
513 kind = _('fifo')
518 elif stat.S_ISSOCK(mode):
514 elif stat.S_ISSOCK(mode):
519 kind = _('socket')
515 kind = _('socket')
520 elif stat.S_ISDIR(mode):
516 elif stat.S_ISDIR(mode):
521 kind = _('directory')
517 kind = _('directory')
522 return _('unsupported file type (type is %s)') % kind
518 return _('unsupported file type (type is %s)') % kind
523
519
524 ignore = self._ignore
520 ignore = self._ignore
525 dirignore = self._dirignore
521 dirignore = self._dirignore
526 if ignored:
522 if ignored:
527 ignore = util.never
523 ignore = util.never
528 dirignore = util.never
524 dirignore = util.never
529 elif not unknown:
525 elif not unknown:
530 # if unknown and ignored are False, skip step 2
526 # if unknown and ignored are False, skip step 2
531 ignore = util.always
527 ignore = util.always
532 dirignore = util.always
528 dirignore = util.always
533
529
534 matchfn = match.matchfn
530 matchfn = match.matchfn
535 badfn = match.bad
531 badfn = match.bad
536 dmap = self._map
532 dmap = self._map
537 normpath = util.normpath
533 normpath = util.normpath
538 listdir = osutil.listdir
534 listdir = osutil.listdir
539 lstat = os.lstat
535 lstat = os.lstat
540 getkind = stat.S_IFMT
536 getkind = stat.S_IFMT
541 dirkind = stat.S_IFDIR
537 dirkind = stat.S_IFDIR
542 regkind = stat.S_IFREG
538 regkind = stat.S_IFREG
543 lnkkind = stat.S_IFLNK
539 lnkkind = stat.S_IFLNK
544 join = self._join
540 join = self._join
545 work = []
541 work = []
546 wadd = work.append
542 wadd = work.append
547
543
548 exact = skipstep3 = False
544 exact = skipstep3 = False
549 if matchfn == match.exact: # match.exact
545 if matchfn == match.exact: # match.exact
550 exact = True
546 exact = True
551 dirignore = util.always # skip step 2
547 dirignore = util.always # skip step 2
552 elif match.files() and not match.anypats(): # match.match, no patterns
548 elif match.files() and not match.anypats(): # match.match, no patterns
553 skipstep3 = True
549 skipstep3 = True
554
550
555 if self._checkcase:
551 if self._checkcase:
556 normalize = self._normalize
552 normalize = self._normalize
557 skipstep3 = False
553 skipstep3 = False
558 else:
554 else:
559 normalize = lambda x, y: x
555 normalize = lambda x, y: x
560
556
561 files = sorted(match.files())
557 files = sorted(match.files())
562 subrepos.sort()
558 subrepos.sort()
563 i, j = 0, 0
559 i, j = 0, 0
564 while i < len(files) and j < len(subrepos):
560 while i < len(files) and j < len(subrepos):
565 subpath = subrepos[j] + "/"
561 subpath = subrepos[j] + "/"
566 if files[i] < subpath:
562 if files[i] < subpath:
567 i += 1
563 i += 1
568 continue
564 continue
569 while i < len(files) and files[i].startswith(subpath):
565 while i < len(files) and files[i].startswith(subpath):
570 del files[i]
566 del files[i]
571 j += 1
567 j += 1
572
568
573 if not files or '.' in files:
569 if not files or '.' in files:
574 files = ['']
570 files = ['']
575 results = dict.fromkeys(subrepos)
571 results = dict.fromkeys(subrepos)
576 results['.hg'] = None
572 results['.hg'] = None
577
573
578 # step 1: find all explicit files
574 # step 1: find all explicit files
579 for ff in files:
575 for ff in files:
580 nf = normalize(normpath(ff), False)
576 nf = normalize(normpath(ff), False)
581 if nf in results:
577 if nf in results:
582 continue
578 continue
583
579
584 try:
580 try:
585 st = lstat(join(nf))
581 st = lstat(join(nf))
586 kind = getkind(st.st_mode)
582 kind = getkind(st.st_mode)
587 if kind == dirkind:
583 if kind == dirkind:
588 skipstep3 = False
584 skipstep3 = False
589 if nf in dmap:
585 if nf in dmap:
590 #file deleted on disk but still in dirstate
586 #file deleted on disk but still in dirstate
591 results[nf] = None
587 results[nf] = None
592 match.dir(nf)
588 match.dir(nf)
593 if not dirignore(nf):
589 if not dirignore(nf):
594 wadd(nf)
590 wadd(nf)
595 elif kind == regkind or kind == lnkkind:
591 elif kind == regkind or kind == lnkkind:
596 results[nf] = st
592 results[nf] = st
597 else:
593 else:
598 badfn(ff, badtype(kind))
594 badfn(ff, badtype(kind))
599 if nf in dmap:
595 if nf in dmap:
600 results[nf] = None
596 results[nf] = None
601 except OSError, inst:
597 except OSError, inst:
602 if nf in dmap: # does it exactly match a file?
598 if nf in dmap: # does it exactly match a file?
603 results[nf] = None
599 results[nf] = None
604 else: # does it match a directory?
600 else: # does it match a directory?
605 prefix = nf + "/"
601 prefix = nf + "/"
606 for fn in dmap:
602 for fn in dmap:
607 if fn.startswith(prefix):
603 if fn.startswith(prefix):
608 match.dir(nf)
604 match.dir(nf)
609 skipstep3 = False
605 skipstep3 = False
610 break
606 break
611 else:
607 else:
612 badfn(ff, inst.strerror)
608 badfn(ff, inst.strerror)
613
609
614 # step 2: visit subdirectories
610 # step 2: visit subdirectories
615 while work:
611 while work:
616 nd = work.pop()
612 nd = work.pop()
617 skip = None
613 skip = None
618 if nd == '.':
614 if nd == '.':
619 nd = ''
615 nd = ''
620 else:
616 else:
621 skip = '.hg'
617 skip = '.hg'
622 try:
618 try:
623 entries = listdir(join(nd), stat=True, skip=skip)
619 entries = listdir(join(nd), stat=True, skip=skip)
624 except OSError, inst:
620 except OSError, inst:
625 if inst.errno == errno.EACCES:
621 if inst.errno == errno.EACCES:
626 fwarn(nd, inst.strerror)
622 fwarn(nd, inst.strerror)
627 continue
623 continue
628 raise
624 raise
629 for f, kind, st in entries:
625 for f, kind, st in entries:
630 nf = normalize(nd and (nd + "/" + f) or f, True)
626 nf = normalize(nd and (nd + "/" + f) or f, True)
631 if nf not in results:
627 if nf not in results:
632 if kind == dirkind:
628 if kind == dirkind:
633 if not ignore(nf):
629 if not ignore(nf):
634 match.dir(nf)
630 match.dir(nf)
635 wadd(nf)
631 wadd(nf)
636 if nf in dmap and matchfn(nf):
632 if nf in dmap and matchfn(nf):
637 results[nf] = None
633 results[nf] = None
638 elif kind == regkind or kind == lnkkind:
634 elif kind == regkind or kind == lnkkind:
639 if nf in dmap:
635 if nf in dmap:
640 if matchfn(nf):
636 if matchfn(nf):
641 results[nf] = st
637 results[nf] = st
642 elif matchfn(nf) and not ignore(nf):
638 elif matchfn(nf) and not ignore(nf):
643 results[nf] = st
639 results[nf] = st
644 elif nf in dmap and matchfn(nf):
640 elif nf in dmap and matchfn(nf):
645 results[nf] = None
641 results[nf] = None
646
642
647 # step 3: report unseen items in the dmap hash
643 # step 3: report unseen items in the dmap hash
648 if not skipstep3 and not exact:
644 if not skipstep3 and not exact:
649 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
645 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
650 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
646 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
651 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
647 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
652 st = None
648 st = None
653 results[nf] = st
649 results[nf] = st
654 for s in subrepos:
650 for s in subrepos:
655 del results[s]
651 del results[s]
656 del results['.hg']
652 del results['.hg']
657 return results
653 return results
658
654
659 def status(self, match, subrepos, ignored, clean, unknown):
655 def status(self, match, subrepos, ignored, clean, unknown):
660 '''Determine the status of the working copy relative to the
656 '''Determine the status of the working copy relative to the
661 dirstate and return a tuple of lists (unsure, modified, added,
657 dirstate and return a tuple of lists (unsure, modified, added,
662 removed, deleted, unknown, ignored, clean), where:
658 removed, deleted, unknown, ignored, clean), where:
663
659
664 unsure:
660 unsure:
665 files that might have been modified since the dirstate was
661 files that might have been modified since the dirstate was
666 written, but need to be read to be sure (size is the same
662 written, but need to be read to be sure (size is the same
667 but mtime differs)
663 but mtime differs)
668 modified:
664 modified:
669 files that have definitely been modified since the dirstate
665 files that have definitely been modified since the dirstate
670 was written (different size or mode)
666 was written (different size or mode)
671 added:
667 added:
672 files that have been explicitly added with hg add
668 files that have been explicitly added with hg add
673 removed:
669 removed:
674 files that have been explicitly removed with hg remove
670 files that have been explicitly removed with hg remove
675 deleted:
671 deleted:
676 files that have been deleted through other means ("missing")
672 files that have been deleted through other means ("missing")
677 unknown:
673 unknown:
678 files not in the dirstate that are not ignored
674 files not in the dirstate that are not ignored
679 ignored:
675 ignored:
680 files not in the dirstate that are ignored
676 files not in the dirstate that are ignored
681 (by _dirignore())
677 (by _dirignore())
682 clean:
678 clean:
683 files that have definitely not been modified since the
679 files that have definitely not been modified since the
684 dirstate was written
680 dirstate was written
685 '''
681 '''
686 listignored, listclean, listunknown = ignored, clean, unknown
682 listignored, listclean, listunknown = ignored, clean, unknown
687 lookup, modified, added, unknown, ignored = [], [], [], [], []
683 lookup, modified, added, unknown, ignored = [], [], [], [], []
688 removed, deleted, clean = [], [], []
684 removed, deleted, clean = [], [], []
689
685
690 dmap = self._map
686 dmap = self._map
691 ladd = lookup.append # aka "unsure"
687 ladd = lookup.append # aka "unsure"
692 madd = modified.append
688 madd = modified.append
693 aadd = added.append
689 aadd = added.append
694 uadd = unknown.append
690 uadd = unknown.append
695 iadd = ignored.append
691 iadd = ignored.append
696 radd = removed.append
692 radd = removed.append
697 dadd = deleted.append
693 dadd = deleted.append
698 cadd = clean.append
694 cadd = clean.append
699
695
700 lnkkind = stat.S_IFLNK
696 lnkkind = stat.S_IFLNK
701
697
702 for fn, st in self.walk(match, subrepos, listunknown,
698 for fn, st in self.walk(match, subrepos, listunknown,
703 listignored).iteritems():
699 listignored).iteritems():
704 if fn not in dmap:
700 if fn not in dmap:
705 if (listignored or match.exact(fn)) and self._dirignore(fn):
701 if (listignored or match.exact(fn)) and self._dirignore(fn):
706 if listignored:
702 if listignored:
707 iadd(fn)
703 iadd(fn)
708 elif listunknown:
704 elif listunknown:
709 uadd(fn)
705 uadd(fn)
710 continue
706 continue
711
707
712 state, mode, size, time = dmap[fn]
708 state, mode, size, time = dmap[fn]
713
709
714 if not st and state in "nma":
710 if not st and state in "nma":
715 dadd(fn)
711 dadd(fn)
716 elif state == 'n':
712 elif state == 'n':
717 # The "mode & lnkkind != lnkkind or self._checklink"
713 # The "mode & lnkkind != lnkkind or self._checklink"
718 # lines are an expansion of "islink => checklink"
714 # lines are an expansion of "islink => checklink"
719 # where islink means "is this a link?" and checklink
715 # where islink means "is this a link?" and checklink
720 # means "can we check links?".
716 # means "can we check links?".
721 mtime = int(st.st_mtime)
717 mtime = int(st.st_mtime)
722 if (size >= 0 and
718 if (size >= 0 and
723 (size != st.st_size
719 (size != st.st_size
724 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
720 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
725 and (mode & lnkkind != lnkkind or self._checklink)
721 and (mode & lnkkind != lnkkind or self._checklink)
726 or size == -2 # other parent
722 or size == -2 # other parent
727 or fn in self._copymap):
723 or fn in self._copymap):
728 madd(fn)
724 madd(fn)
729 elif (mtime != time
725 elif (mtime != time
730 and (mode & lnkkind != lnkkind or self._checklink)):
726 and (mode & lnkkind != lnkkind or self._checklink)):
731 ladd(fn)
727 ladd(fn)
732 elif mtime == self._lastnormaltime:
728 elif mtime == self._lastnormaltime:
733 # fn may have been changed in the same timeslot without
729 # fn may have been changed in the same timeslot without
734 # changing its size. This can happen if we quickly do
730 # changing its size. This can happen if we quickly do
735 # multiple commits in a single transaction.
731 # multiple commits in a single transaction.
736 # Force lookup, so we don't miss such a racy file change.
732 # Force lookup, so we don't miss such a racy file change.
737 ladd(fn)
733 ladd(fn)
738 elif listclean:
734 elif listclean:
739 cadd(fn)
735 cadd(fn)
740 elif state == 'm':
736 elif state == 'm':
741 madd(fn)
737 madd(fn)
742 elif state == 'a':
738 elif state == 'a':
743 aadd(fn)
739 aadd(fn)
744 elif state == 'r':
740 elif state == 'r':
745 radd(fn)
741 radd(fn)
746
742
747 return (lookup, modified, added, removed, deleted, unknown, ignored,
743 return (lookup, modified, added, removed, deleted, unknown, ignored,
748 clean)
744 clean)
@@ -1,594 +1,594
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import scmutil, util, filemerge, copies, subrepo
10 import scmutil, util, filemerge, copies, subrepo
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._dirty = False
17 self._dirty = False
18 self._read()
18 self._read()
19 def reset(self, node=None):
19 def reset(self, node=None):
20 self._state = {}
20 self._state = {}
21 if node:
21 if node:
22 self._local = node
22 self._local = node
23 shutil.rmtree(self._repo.join("merge"), True)
23 shutil.rmtree(self._repo.join("merge"), True)
24 self._dirty = False
24 self._dirty = False
25 def _read(self):
25 def _read(self):
26 self._state = {}
26 self._state = {}
27 try:
27 try:
28 f = self._repo.opener("merge/state")
28 f = self._repo.opener("merge/state")
29 for i, l in enumerate(f):
29 for i, l in enumerate(f):
30 if i == 0:
30 if i == 0:
31 self._local = bin(l[:-1])
31 self._local = bin(l[:-1])
32 else:
32 else:
33 bits = l[:-1].split("\0")
33 bits = l[:-1].split("\0")
34 self._state[bits[0]] = bits[1:]
34 self._state[bits[0]] = bits[1:]
35 f.close()
35 f.close()
36 except IOError, err:
36 except IOError, err:
37 if err.errno != errno.ENOENT:
37 if err.errno != errno.ENOENT:
38 raise
38 raise
39 self._dirty = False
39 self._dirty = False
40 def commit(self):
40 def commit(self):
41 if self._dirty:
41 if self._dirty:
42 f = self._repo.opener("merge/state", "w")
42 f = self._repo.opener("merge/state", "w")
43 f.write(hex(self._local) + "\n")
43 f.write(hex(self._local) + "\n")
44 for d, v in self._state.iteritems():
44 for d, v in self._state.iteritems():
45 f.write("\0".join([d] + v) + "\n")
45 f.write("\0".join([d] + v) + "\n")
46 f.close()
46 f.close()
47 self._dirty = False
47 self._dirty = False
48 def add(self, fcl, fco, fca, fd, flags):
48 def add(self, fcl, fco, fca, fd, flags):
49 hash = util.sha1(fcl.path()).hexdigest()
49 hash = util.sha1(fcl.path()).hexdigest()
50 self._repo.opener.write("merge/" + hash, fcl.data())
50 self._repo.opener.write("merge/" + hash, fcl.data())
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 hex(fca.filenode()), fco.path(), flags]
52 hex(fca.filenode()), fco.path(), flags]
53 self._dirty = True
53 self._dirty = True
54 def __contains__(self, dfile):
54 def __contains__(self, dfile):
55 return dfile in self._state
55 return dfile in self._state
56 def __getitem__(self, dfile):
56 def __getitem__(self, dfile):
57 return self._state[dfile][0]
57 return self._state[dfile][0]
58 def __iter__(self):
58 def __iter__(self):
59 l = self._state.keys()
59 l = self._state.keys()
60 l.sort()
60 l.sort()
61 for f in l:
61 for f in l:
62 yield f
62 yield f
63 def mark(self, dfile, state):
63 def mark(self, dfile, state):
64 self._state[dfile][0] = state
64 self._state[dfile][0] = state
65 self._dirty = True
65 self._dirty = True
66 def resolve(self, dfile, wctx, octx):
66 def resolve(self, dfile, wctx, octx):
67 if self[dfile] == 'r':
67 if self[dfile] == 'r':
68 return 0
68 return 0
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 f = self._repo.opener("merge/" + hash)
70 f = self._repo.opener("merge/" + hash)
71 self._repo.wwrite(dfile, f.read(), flags)
71 self._repo.wwrite(dfile, f.read(), flags)
72 f.close()
72 f.close()
73 fcd = wctx[dfile]
73 fcd = wctx[dfile]
74 fco = octx[ofile]
74 fco = octx[ofile]
75 fca = self._repo.filectx(afile, fileid=anode)
75 fca = self._repo.filectx(afile, fileid=anode)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 if r is None:
77 if r is None:
78 # no real conflict
78 # no real conflict
79 del self._state[dfile]
79 del self._state[dfile]
80 elif not r:
80 elif not r:
81 self.mark(dfile, 'r')
81 self.mark(dfile, 'r')
82 return r
82 return r
83
83
84 def _checkunknownfile(repo, wctx, mctx, f):
84 def _checkunknownfile(repo, wctx, mctx, f):
85 return (not repo.dirstate._ignore(f)
85 return (not repo.dirstate._ignore(f)
86 and os.path.exists(repo.wjoin(f))
86 and os.path.exists(repo.wjoin(f))
87 and mctx[f].cmp(wctx[f]))
87 and mctx[f].cmp(wctx[f]))
88
88
89 def _checkunknown(repo, wctx, mctx):
89 def _checkunknown(repo, wctx, mctx):
90 "check for collisions between unknown files and files in mctx"
90 "check for collisions between unknown files and files in mctx"
91
91
92 error = False
92 error = False
93 for f in mctx:
93 for f in mctx:
94 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
94 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
95 error = True
95 error = True
96 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
96 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
97 if error:
97 if error:
98 raise util.Abort(_("untracked files in working directory differ "
98 raise util.Abort(_("untracked files in working directory differ "
99 "from files in requested revision"))
99 "from files in requested revision"))
100
100
101 def _checkcollision(mctx, wctx):
101 def _checkcollision(mctx, wctx):
102 "check for case folding collisions in the destination context"
102 "check for case folding collisions in the destination context"
103 folded = {}
103 folded = {}
104 for fn in mctx:
104 for fn in mctx:
105 fold = util.normcase(fn)
105 fold = util.normcase(fn)
106 if fold in folded:
106 if fold in folded:
107 raise util.Abort(_("case-folding collision between %s and %s")
107 raise util.Abort(_("case-folding collision between %s and %s")
108 % (fn, folded[fold]))
108 % (fn, folded[fold]))
109 folded[fold] = fn
109 folded[fold] = fn
110
110
111 if wctx:
111 if wctx:
112 for fn in wctx:
112 for fn in wctx:
113 fold = util.normcase(fn)
113 fold = util.normcase(fn)
114 mfn = folded.get(fold, None)
114 mfn = folded.get(fold, None)
115 if mfn and (mfn != fn):
115 if mfn and (mfn != fn):
116 raise util.Abort(_("case-folding collision between %s and %s")
116 raise util.Abort(_("case-folding collision between %s and %s")
117 % (mfn, fn))
117 % (mfn, fn))
118
118
119 def _forgetremoved(wctx, mctx, branchmerge):
119 def _forgetremoved(wctx, mctx, branchmerge):
120 """
120 """
121 Forget removed files
121 Forget removed files
122
122
123 If we're jumping between revisions (as opposed to merging), and if
123 If we're jumping between revisions (as opposed to merging), and if
124 neither the working directory nor the target rev has the file,
124 neither the working directory nor the target rev has the file,
125 then we need to remove it from the dirstate, to prevent the
125 then we need to remove it from the dirstate, to prevent the
126 dirstate from listing the file when it is no longer in the
126 dirstate from listing the file when it is no longer in the
127 manifest.
127 manifest.
128
128
129 If we're merging, and the other revision has removed a file
129 If we're merging, and the other revision has removed a file
130 that is not present in the working directory, we need to mark it
130 that is not present in the working directory, we need to mark it
131 as removed.
131 as removed.
132 """
132 """
133
133
134 action = []
134 action = []
135 state = branchmerge and 'r' or 'f'
135 state = branchmerge and 'r' or 'f'
136 for f in wctx.deleted():
136 for f in wctx.deleted():
137 if f not in mctx:
137 if f not in mctx:
138 action.append((f, state))
138 action.append((f, state))
139
139
140 if not branchmerge:
140 if not branchmerge:
141 for f in wctx.removed():
141 for f in wctx.removed():
142 if f not in mctx:
142 if f not in mctx:
143 action.append((f, "f"))
143 action.append((f, "f"))
144
144
145 return action
145 return action
146
146
147 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
147 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
148 """
148 """
149 Merge p1 and p2 with ancestor pa and generate merge action list
149 Merge p1 and p2 with ancestor pa and generate merge action list
150
150
151 overwrite = whether we clobber working files
151 overwrite = whether we clobber working files
152 partial = function to filter file lists
152 partial = function to filter file lists
153 """
153 """
154
154
155 def fmerge(f, f2, fa):
155 def fmerge(f, f2, fa):
156 """merge flags"""
156 """merge flags"""
157 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
157 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
158 if m == n: # flags agree
158 if m == n: # flags agree
159 return m # unchanged
159 return m # unchanged
160 if m and n and not a: # flags set, don't agree, differ from parent
160 if m and n and not a: # flags set, don't agree, differ from parent
161 r = repo.ui.promptchoice(
161 r = repo.ui.promptchoice(
162 _(" conflicting flags for %s\n"
162 _(" conflicting flags for %s\n"
163 "(n)one, e(x)ec or sym(l)ink?") % f,
163 "(n)one, e(x)ec or sym(l)ink?") % f,
164 (_("&None"), _("E&xec"), _("Sym&link")), 0)
164 (_("&None"), _("E&xec"), _("Sym&link")), 0)
165 if r == 1:
165 if r == 1:
166 return "x" # Exec
166 return "x" # Exec
167 if r == 2:
167 if r == 2:
168 return "l" # Symlink
168 return "l" # Symlink
169 return ""
169 return ""
170 if m and m != a: # changed from a to m
170 if m and m != a: # changed from a to m
171 return m
171 return m
172 if n and n != a: # changed from a to n
172 if n and n != a: # changed from a to n
173 if (n == 'l' or a == 'l') and m1[f] != ma[f]:
173 if (n == 'l' or a == 'l') and m1.get(f) != ma.get(f):
174 # can't automatically merge symlink flag when there
174 # can't automatically merge symlink flag when there
175 # are file-level conflicts here, let filemerge take
175 # are file-level conflicts here, let filemerge take
176 # care of it
176 # care of it
177 return m
177 return m
178 return n
178 return n
179 return '' # flag was cleared
179 return '' # flag was cleared
180
180
181 def act(msg, m, f, *args):
181 def act(msg, m, f, *args):
182 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
182 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
183 action.append((f, m) + args)
183 action.append((f, m) + args)
184
184
185 action, copy = [], {}
185 action, copy = [], {}
186
186
187 if overwrite:
187 if overwrite:
188 pa = p1
188 pa = p1
189 elif pa == p2: # backwards
189 elif pa == p2: # backwards
190 pa = p1.p1()
190 pa = p1.p1()
191 elif pa and repo.ui.configbool("merge", "followcopies", True):
191 elif pa and repo.ui.configbool("merge", "followcopies", True):
192 copy, diverge = copies.mergecopies(repo, p1, p2, pa)
192 copy, diverge = copies.mergecopies(repo, p1, p2, pa)
193 for of, fl in diverge.iteritems():
193 for of, fl in diverge.iteritems():
194 act("divergent renames", "dr", of, fl)
194 act("divergent renames", "dr", of, fl)
195
195
196 repo.ui.note(_("resolving manifests\n"))
196 repo.ui.note(_("resolving manifests\n"))
197 repo.ui.debug(" overwrite: %s, partial: %s\n"
197 repo.ui.debug(" overwrite: %s, partial: %s\n"
198 % (bool(overwrite), bool(partial)))
198 % (bool(overwrite), bool(partial)))
199 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2))
199 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2))
200
200
201 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
201 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
202 copied = set(copy.values())
202 copied = set(copy.values())
203
203
204 if '.hgsubstate' in m1:
204 if '.hgsubstate' in m1:
205 # check whether sub state is modified
205 # check whether sub state is modified
206 for s in p1.substate:
206 for s in p1.substate:
207 if p1.sub(s).dirty():
207 if p1.sub(s).dirty():
208 m1['.hgsubstate'] += "+"
208 m1['.hgsubstate'] += "+"
209 break
209 break
210
210
211 # Compare manifests
211 # Compare manifests
212 for f, n in m1.iteritems():
212 for f, n in m1.iteritems():
213 if partial and not partial(f):
213 if partial and not partial(f):
214 continue
214 continue
215 if f in m2:
215 if f in m2:
216 rflags = fmerge(f, f, f)
216 rflags = fmerge(f, f, f)
217 a = ma.get(f, nullid)
217 a = ma.get(f, nullid)
218 if n == m2[f] or m2[f] == a: # same or local newer
218 if n == m2[f] or m2[f] == a: # same or local newer
219 # is file locally modified or flags need changing?
219 # is file locally modified or flags need changing?
220 # dirstate flags may need to be made current
220 # dirstate flags may need to be made current
221 if m1.flags(f) != rflags or n[20:]:
221 if m1.flags(f) != rflags or n[20:]:
222 act("update permissions", "e", f, rflags)
222 act("update permissions", "e", f, rflags)
223 elif n == a: # remote newer
223 elif n == a: # remote newer
224 act("remote is newer", "g", f, rflags)
224 act("remote is newer", "g", f, rflags)
225 else: # both changed
225 else: # both changed
226 act("versions differ", "m", f, f, f, rflags, False)
226 act("versions differ", "m", f, f, f, rflags, False)
227 elif f in copied: # files we'll deal with on m2 side
227 elif f in copied: # files we'll deal with on m2 side
228 pass
228 pass
229 elif f in copy:
229 elif f in copy:
230 f2 = copy[f]
230 f2 = copy[f]
231 if f2 not in m2: # directory rename
231 if f2 not in m2: # directory rename
232 act("remote renamed directory to " + f2, "d",
232 act("remote renamed directory to " + f2, "d",
233 f, None, f2, m1.flags(f))
233 f, None, f2, m1.flags(f))
234 else: # case 2 A,B/B/B or case 4,21 A/B/B
234 else: # case 2 A,B/B/B or case 4,21 A/B/B
235 act("local copied/moved to " + f2, "m",
235 act("local copied/moved to " + f2, "m",
236 f, f2, f, fmerge(f, f2, f2), False)
236 f, f2, f, fmerge(f, f2, f2), False)
237 elif f in ma: # clean, a different, no remote
237 elif f in ma: # clean, a different, no remote
238 if n != ma[f]:
238 if n != ma[f]:
239 if repo.ui.promptchoice(
239 if repo.ui.promptchoice(
240 _(" local changed %s which remote deleted\n"
240 _(" local changed %s which remote deleted\n"
241 "use (c)hanged version or (d)elete?") % f,
241 "use (c)hanged version or (d)elete?") % f,
242 (_("&Changed"), _("&Delete")), 0):
242 (_("&Changed"), _("&Delete")), 0):
243 act("prompt delete", "r", f)
243 act("prompt delete", "r", f)
244 else:
244 else:
245 act("prompt keep", "a", f)
245 act("prompt keep", "a", f)
246 elif n[20:] == "a": # added, no remote
246 elif n[20:] == "a": # added, no remote
247 act("remote deleted", "f", f)
247 act("remote deleted", "f", f)
248 else:
248 else:
249 act("other deleted", "r", f)
249 act("other deleted", "r", f)
250
250
251 for f, n in m2.iteritems():
251 for f, n in m2.iteritems():
252 if partial and not partial(f):
252 if partial and not partial(f):
253 continue
253 continue
254 if f in m1 or f in copied: # files already visited
254 if f in m1 or f in copied: # files already visited
255 continue
255 continue
256 if f in copy:
256 if f in copy:
257 f2 = copy[f]
257 f2 = copy[f]
258 if f2 not in m1: # directory rename
258 if f2 not in m1: # directory rename
259 act("local renamed directory to " + f2, "d",
259 act("local renamed directory to " + f2, "d",
260 None, f, f2, m2.flags(f))
260 None, f, f2, m2.flags(f))
261 elif f2 in m2: # rename case 1, A/A,B/A
261 elif f2 in m2: # rename case 1, A/A,B/A
262 act("remote copied to " + f, "m",
262 act("remote copied to " + f, "m",
263 f2, f, f, fmerge(f2, f, f2), False)
263 f2, f, f, fmerge(f2, f, f2), False)
264 else: # case 3,20 A/B/A
264 else: # case 3,20 A/B/A
265 act("remote moved to " + f, "m",
265 act("remote moved to " + f, "m",
266 f2, f, f, fmerge(f2, f, f2), True)
266 f2, f, f, fmerge(f2, f, f2), True)
267 elif f not in ma:
267 elif f not in ma:
268 if (not overwrite
268 if (not overwrite
269 and _checkunknownfile(repo, p1, p2, f)):
269 and _checkunknownfile(repo, p1, p2, f)):
270 rflags = fmerge(f, f, f)
270 rflags = fmerge(f, f, f)
271 act("remote differs from untracked local",
271 act("remote differs from untracked local",
272 "m", f, f, f, rflags, False)
272 "m", f, f, f, rflags, False)
273 else:
273 else:
274 act("remote created", "g", f, m2.flags(f))
274 act("remote created", "g", f, m2.flags(f))
275 elif n != ma[f]:
275 elif n != ma[f]:
276 if repo.ui.promptchoice(
276 if repo.ui.promptchoice(
277 _("remote changed %s which local deleted\n"
277 _("remote changed %s which local deleted\n"
278 "use (c)hanged version or leave (d)eleted?") % f,
278 "use (c)hanged version or leave (d)eleted?") % f,
279 (_("&Changed"), _("&Deleted")), 0) == 0:
279 (_("&Changed"), _("&Deleted")), 0) == 0:
280 act("prompt recreating", "g", f, m2.flags(f))
280 act("prompt recreating", "g", f, m2.flags(f))
281
281
282 return action
282 return action
283
283
284 def actionkey(a):
284 def actionkey(a):
285 return a[1] == 'r' and -1 or 0, a
285 return a[1] == 'r' and -1 or 0, a
286
286
287 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
287 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
288 """apply the merge action list to the working directory
288 """apply the merge action list to the working directory
289
289
290 wctx is the working copy context
290 wctx is the working copy context
291 mctx is the context to be merged into the working copy
291 mctx is the context to be merged into the working copy
292 actx is the context of the common ancestor
292 actx is the context of the common ancestor
293
293
294 Return a tuple of counts (updated, merged, removed, unresolved) that
294 Return a tuple of counts (updated, merged, removed, unresolved) that
295 describes how many files were affected by the update.
295 describes how many files were affected by the update.
296 """
296 """
297
297
298 updated, merged, removed, unresolved = 0, 0, 0, 0
298 updated, merged, removed, unresolved = 0, 0, 0, 0
299 ms = mergestate(repo)
299 ms = mergestate(repo)
300 ms.reset(wctx.p1().node())
300 ms.reset(wctx.p1().node())
301 moves = []
301 moves = []
302 action.sort(key=actionkey)
302 action.sort(key=actionkey)
303
303
304 # prescan for merges
304 # prescan for merges
305 for a in action:
305 for a in action:
306 f, m = a[:2]
306 f, m = a[:2]
307 if m == 'm': # merge
307 if m == 'm': # merge
308 f2, fd, flags, move = a[2:]
308 f2, fd, flags, move = a[2:]
309 if f == '.hgsubstate': # merged internally
309 if f == '.hgsubstate': # merged internally
310 continue
310 continue
311 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
311 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
312 fcl = wctx[f]
312 fcl = wctx[f]
313 fco = mctx[f2]
313 fco = mctx[f2]
314 if mctx == actx: # backwards, use working dir parent as ancestor
314 if mctx == actx: # backwards, use working dir parent as ancestor
315 if fcl.parents():
315 if fcl.parents():
316 fca = fcl.p1()
316 fca = fcl.p1()
317 else:
317 else:
318 fca = repo.filectx(f, fileid=nullrev)
318 fca = repo.filectx(f, fileid=nullrev)
319 else:
319 else:
320 fca = fcl.ancestor(fco, actx)
320 fca = fcl.ancestor(fco, actx)
321 if not fca:
321 if not fca:
322 fca = repo.filectx(f, fileid=nullrev)
322 fca = repo.filectx(f, fileid=nullrev)
323 ms.add(fcl, fco, fca, fd, flags)
323 ms.add(fcl, fco, fca, fd, flags)
324 if f != fd and move:
324 if f != fd and move:
325 moves.append(f)
325 moves.append(f)
326
326
327 audit = scmutil.pathauditor(repo.root)
327 audit = scmutil.pathauditor(repo.root)
328
328
329 # remove renamed files after safely stored
329 # remove renamed files after safely stored
330 for f in moves:
330 for f in moves:
331 if os.path.lexists(repo.wjoin(f)):
331 if os.path.lexists(repo.wjoin(f)):
332 repo.ui.debug("removing %s\n" % f)
332 repo.ui.debug("removing %s\n" % f)
333 audit(f)
333 audit(f)
334 os.unlink(repo.wjoin(f))
334 os.unlink(repo.wjoin(f))
335
335
336 numupdates = len(action)
336 numupdates = len(action)
337 for i, a in enumerate(action):
337 for i, a in enumerate(action):
338 f, m = a[:2]
338 f, m = a[:2]
339 repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
339 repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
340 unit=_('files'))
340 unit=_('files'))
341 if f and f[0] == "/":
341 if f and f[0] == "/":
342 continue
342 continue
343 if m == "r": # remove
343 if m == "r": # remove
344 repo.ui.note(_("removing %s\n") % f)
344 repo.ui.note(_("removing %s\n") % f)
345 audit(f)
345 audit(f)
346 if f == '.hgsubstate': # subrepo states need updating
346 if f == '.hgsubstate': # subrepo states need updating
347 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
347 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
348 try:
348 try:
349 util.unlinkpath(repo.wjoin(f))
349 util.unlinkpath(repo.wjoin(f))
350 except OSError, inst:
350 except OSError, inst:
351 if inst.errno != errno.ENOENT:
351 if inst.errno != errno.ENOENT:
352 repo.ui.warn(_("update failed to remove %s: %s!\n") %
352 repo.ui.warn(_("update failed to remove %s: %s!\n") %
353 (f, inst.strerror))
353 (f, inst.strerror))
354 removed += 1
354 removed += 1
355 elif m == "m": # merge
355 elif m == "m": # merge
356 if f == '.hgsubstate': # subrepo states need updating
356 if f == '.hgsubstate': # subrepo states need updating
357 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
357 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
358 continue
358 continue
359 f2, fd, flags, move = a[2:]
359 f2, fd, flags, move = a[2:]
360 repo.wopener.audit(fd)
360 repo.wopener.audit(fd)
361 r = ms.resolve(fd, wctx, mctx)
361 r = ms.resolve(fd, wctx, mctx)
362 if r is not None and r > 0:
362 if r is not None and r > 0:
363 unresolved += 1
363 unresolved += 1
364 else:
364 else:
365 if r is None:
365 if r is None:
366 updated += 1
366 updated += 1
367 else:
367 else:
368 merged += 1
368 merged += 1
369 if (move and repo.dirstate.normalize(fd) != f
369 if (move and repo.dirstate.normalize(fd) != f
370 and os.path.lexists(repo.wjoin(f))):
370 and os.path.lexists(repo.wjoin(f))):
371 repo.ui.debug("removing %s\n" % f)
371 repo.ui.debug("removing %s\n" % f)
372 audit(f)
372 audit(f)
373 os.unlink(repo.wjoin(f))
373 os.unlink(repo.wjoin(f))
374 elif m == "g": # get
374 elif m == "g": # get
375 flags = a[2]
375 flags = a[2]
376 repo.ui.note(_("getting %s\n") % f)
376 repo.ui.note(_("getting %s\n") % f)
377 t = mctx.filectx(f).data()
377 t = mctx.filectx(f).data()
378 repo.wwrite(f, t, flags)
378 repo.wwrite(f, t, flags)
379 t = None
379 t = None
380 updated += 1
380 updated += 1
381 if f == '.hgsubstate': # subrepo states need updating
381 if f == '.hgsubstate': # subrepo states need updating
382 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
382 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
383 elif m == "d": # directory rename
383 elif m == "d": # directory rename
384 f2, fd, flags = a[2:]
384 f2, fd, flags = a[2:]
385 if f:
385 if f:
386 repo.ui.note(_("moving %s to %s\n") % (f, fd))
386 repo.ui.note(_("moving %s to %s\n") % (f, fd))
387 audit(f)
387 audit(f)
388 t = wctx.filectx(f).data()
388 t = wctx.filectx(f).data()
389 repo.wwrite(fd, t, flags)
389 repo.wwrite(fd, t, flags)
390 util.unlinkpath(repo.wjoin(f))
390 util.unlinkpath(repo.wjoin(f))
391 if f2:
391 if f2:
392 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
392 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
393 t = mctx.filectx(f2).data()
393 t = mctx.filectx(f2).data()
394 repo.wwrite(fd, t, flags)
394 repo.wwrite(fd, t, flags)
395 updated += 1
395 updated += 1
396 elif m == "dr": # divergent renames
396 elif m == "dr": # divergent renames
397 fl = a[2]
397 fl = a[2]
398 repo.ui.warn(_("note: possible conflict - %s was renamed "
398 repo.ui.warn(_("note: possible conflict - %s was renamed "
399 "multiple times to:\n") % f)
399 "multiple times to:\n") % f)
400 for nf in fl:
400 for nf in fl:
401 repo.ui.warn(" %s\n" % nf)
401 repo.ui.warn(" %s\n" % nf)
402 elif m == "e": # exec
402 elif m == "e": # exec
403 flags = a[2]
403 flags = a[2]
404 repo.wopener.audit(f)
404 repo.wopener.audit(f)
405 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
405 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
406 ms.commit()
406 ms.commit()
407 repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
407 repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
408
408
409 return updated, merged, removed, unresolved
409 return updated, merged, removed, unresolved
410
410
411 def recordupdates(repo, action, branchmerge):
411 def recordupdates(repo, action, branchmerge):
412 "record merge actions to the dirstate"
412 "record merge actions to the dirstate"
413
413
414 for a in action:
414 for a in action:
415 f, m = a[:2]
415 f, m = a[:2]
416 if m == "r": # remove
416 if m == "r": # remove
417 if branchmerge:
417 if branchmerge:
418 repo.dirstate.remove(f)
418 repo.dirstate.remove(f)
419 else:
419 else:
420 repo.dirstate.drop(f)
420 repo.dirstate.drop(f)
421 elif m == "a": # re-add
421 elif m == "a": # re-add
422 if not branchmerge:
422 if not branchmerge:
423 repo.dirstate.add(f)
423 repo.dirstate.add(f)
424 elif m == "f": # forget
424 elif m == "f": # forget
425 repo.dirstate.drop(f)
425 repo.dirstate.drop(f)
426 elif m == "e": # exec change
426 elif m == "e": # exec change
427 repo.dirstate.normallookup(f)
427 repo.dirstate.normallookup(f)
428 elif m == "g": # get
428 elif m == "g": # get
429 if branchmerge:
429 if branchmerge:
430 repo.dirstate.otherparent(f)
430 repo.dirstate.otherparent(f)
431 else:
431 else:
432 repo.dirstate.normal(f)
432 repo.dirstate.normal(f)
433 elif m == "m": # merge
433 elif m == "m": # merge
434 f2, fd, flag, move = a[2:]
434 f2, fd, flag, move = a[2:]
435 if branchmerge:
435 if branchmerge:
436 # We've done a branch merge, mark this file as merged
436 # We've done a branch merge, mark this file as merged
437 # so that we properly record the merger later
437 # so that we properly record the merger later
438 repo.dirstate.merge(fd)
438 repo.dirstate.merge(fd)
439 if f != f2: # copy/rename
439 if f != f2: # copy/rename
440 if move:
440 if move:
441 repo.dirstate.remove(f)
441 repo.dirstate.remove(f)
442 if f != fd:
442 if f != fd:
443 repo.dirstate.copy(f, fd)
443 repo.dirstate.copy(f, fd)
444 else:
444 else:
445 repo.dirstate.copy(f2, fd)
445 repo.dirstate.copy(f2, fd)
446 else:
446 else:
447 # We've update-merged a locally modified file, so
447 # We've update-merged a locally modified file, so
448 # we set the dirstate to emulate a normal checkout
448 # we set the dirstate to emulate a normal checkout
449 # of that file some time in the past. Thus our
449 # of that file some time in the past. Thus our
450 # merge will appear as a normal local file
450 # merge will appear as a normal local file
451 # modification.
451 # modification.
452 if f2 == fd: # file not locally copied/moved
452 if f2 == fd: # file not locally copied/moved
453 repo.dirstate.normallookup(fd)
453 repo.dirstate.normallookup(fd)
454 if move:
454 if move:
455 repo.dirstate.drop(f)
455 repo.dirstate.drop(f)
456 elif m == "d": # directory rename
456 elif m == "d": # directory rename
457 f2, fd, flag = a[2:]
457 f2, fd, flag = a[2:]
458 if not f2 and f not in repo.dirstate:
458 if not f2 and f not in repo.dirstate:
459 # untracked file moved
459 # untracked file moved
460 continue
460 continue
461 if branchmerge:
461 if branchmerge:
462 repo.dirstate.add(fd)
462 repo.dirstate.add(fd)
463 if f:
463 if f:
464 repo.dirstate.remove(f)
464 repo.dirstate.remove(f)
465 repo.dirstate.copy(f, fd)
465 repo.dirstate.copy(f, fd)
466 if f2:
466 if f2:
467 repo.dirstate.copy(f2, fd)
467 repo.dirstate.copy(f2, fd)
468 else:
468 else:
469 repo.dirstate.normal(fd)
469 repo.dirstate.normal(fd)
470 if f:
470 if f:
471 repo.dirstate.drop(f)
471 repo.dirstate.drop(f)
472
472
473 def update(repo, node, branchmerge, force, partial, ancestor=None):
473 def update(repo, node, branchmerge, force, partial, ancestor=None):
474 """
474 """
475 Perform a merge between the working directory and the given node
475 Perform a merge between the working directory and the given node
476
476
477 node = the node to update to, or None if unspecified
477 node = the node to update to, or None if unspecified
478 branchmerge = whether to merge between branches
478 branchmerge = whether to merge between branches
479 force = whether to force branch merging or file overwriting
479 force = whether to force branch merging or file overwriting
480 partial = a function to filter file lists (dirstate not updated)
480 partial = a function to filter file lists (dirstate not updated)
481
481
482 The table below shows all the behaviors of the update command
482 The table below shows all the behaviors of the update command
483 given the -c and -C or no options, whether the working directory
483 given the -c and -C or no options, whether the working directory
484 is dirty, whether a revision is specified, and the relationship of
484 is dirty, whether a revision is specified, and the relationship of
485 the parent rev to the target rev (linear, on the same named
485 the parent rev to the target rev (linear, on the same named
486 branch, or on another named branch).
486 branch, or on another named branch).
487
487
488 This logic is tested by test-update-branches.t.
488 This logic is tested by test-update-branches.t.
489
489
490 -c -C dirty rev | linear same cross
490 -c -C dirty rev | linear same cross
491 n n n n | ok (1) x
491 n n n n | ok (1) x
492 n n n y | ok ok ok
492 n n n y | ok ok ok
493 n n y * | merge (2) (2)
493 n n y * | merge (2) (2)
494 n y * * | --- discard ---
494 n y * * | --- discard ---
495 y n y * | --- (3) ---
495 y n y * | --- (3) ---
496 y n n * | --- ok ---
496 y n n * | --- ok ---
497 y y * * | --- (4) ---
497 y y * * | --- (4) ---
498
498
499 x = can't happen
499 x = can't happen
500 * = don't-care
500 * = don't-care
501 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
501 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
502 2 = abort: crosses branches (use 'hg merge' to merge or
502 2 = abort: crosses branches (use 'hg merge' to merge or
503 use 'hg update -C' to discard changes)
503 use 'hg update -C' to discard changes)
504 3 = abort: uncommitted local changes
504 3 = abort: uncommitted local changes
505 4 = incompatible options (checked in commands.py)
505 4 = incompatible options (checked in commands.py)
506
506
507 Return the same tuple as applyupdates().
507 Return the same tuple as applyupdates().
508 """
508 """
509
509
510 onode = node
510 onode = node
511 wlock = repo.wlock()
511 wlock = repo.wlock()
512 try:
512 try:
513 wc = repo[None]
513 wc = repo[None]
514 if node is None:
514 if node is None:
515 # tip of current branch
515 # tip of current branch
516 try:
516 try:
517 node = repo.branchtags()[wc.branch()]
517 node = repo.branchtags()[wc.branch()]
518 except KeyError:
518 except KeyError:
519 if wc.branch() == "default": # no default branch!
519 if wc.branch() == "default": # no default branch!
520 node = repo.lookup("tip") # update to tip
520 node = repo.lookup("tip") # update to tip
521 else:
521 else:
522 raise util.Abort(_("branch %s not found") % wc.branch())
522 raise util.Abort(_("branch %s not found") % wc.branch())
523 overwrite = force and not branchmerge
523 overwrite = force and not branchmerge
524 pl = wc.parents()
524 pl = wc.parents()
525 p1, p2 = pl[0], repo[node]
525 p1, p2 = pl[0], repo[node]
526 if ancestor:
526 if ancestor:
527 pa = repo[ancestor]
527 pa = repo[ancestor]
528 else:
528 else:
529 pa = p1.ancestor(p2)
529 pa = p1.ancestor(p2)
530
530
531 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
531 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
532
532
533 ### check phase
533 ### check phase
534 if not overwrite and len(pl) > 1:
534 if not overwrite and len(pl) > 1:
535 raise util.Abort(_("outstanding uncommitted merges"))
535 raise util.Abort(_("outstanding uncommitted merges"))
536 if branchmerge:
536 if branchmerge:
537 if pa == p2:
537 if pa == p2:
538 raise util.Abort(_("merging with a working directory ancestor"
538 raise util.Abort(_("merging with a working directory ancestor"
539 " has no effect"))
539 " has no effect"))
540 elif pa == p1:
540 elif pa == p1:
541 if p1.branch() == p2.branch():
541 if p1.branch() == p2.branch():
542 raise util.Abort(_("nothing to merge"),
542 raise util.Abort(_("nothing to merge"),
543 hint=_("use 'hg update' "
543 hint=_("use 'hg update' "
544 "or check 'hg heads'"))
544 "or check 'hg heads'"))
545 if not force and (wc.files() or wc.deleted()):
545 if not force and (wc.files() or wc.deleted()):
546 raise util.Abort(_("outstanding uncommitted changes"),
546 raise util.Abort(_("outstanding uncommitted changes"),
547 hint=_("use 'hg status' to list changes"))
547 hint=_("use 'hg status' to list changes"))
548 if not force:
548 if not force:
549 _checkunknown(repo, wc, p2)
549 _checkunknown(repo, wc, p2)
550 for s in wc.substate:
550 for s in wc.substate:
551 if wc.sub(s).dirty():
551 if wc.sub(s).dirty():
552 raise util.Abort(_("outstanding uncommitted changes in "
552 raise util.Abort(_("outstanding uncommitted changes in "
553 "subrepository '%s'") % s)
553 "subrepository '%s'") % s)
554
554
555 elif not overwrite:
555 elif not overwrite:
556 if pa == p1 or pa == p2: # linear
556 if pa == p1 or pa == p2: # linear
557 pass # all good
557 pass # all good
558 elif wc.dirty(missing=True):
558 elif wc.dirty(missing=True):
559 raise util.Abort(_("crosses branches (merge branches or use"
559 raise util.Abort(_("crosses branches (merge branches or use"
560 " --clean to discard changes)"))
560 " --clean to discard changes)"))
561 elif onode is None:
561 elif onode is None:
562 raise util.Abort(_("crosses branches (merge branches or update"
562 raise util.Abort(_("crosses branches (merge branches or update"
563 " --check to force update)"))
563 " --check to force update)"))
564 else:
564 else:
565 # Allow jumping branches if clean and specific rev given
565 # Allow jumping branches if clean and specific rev given
566 pa = p1
566 pa = p1
567
567
568 ### calculate phase
568 ### calculate phase
569 action = []
569 action = []
570 folding = not util.checkcase(repo.path)
570 folding = not util.checkcase(repo.path)
571 if folding:
571 if folding:
572 _checkcollision(p2, branchmerge and p1)
572 _checkcollision(p2, branchmerge and p1)
573 action += _forgetremoved(wc, p2, branchmerge)
573 action += _forgetremoved(wc, p2, branchmerge)
574 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
574 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
575
575
576 ### apply phase
576 ### apply phase
577 if not branchmerge: # just jump to the new rev
577 if not branchmerge: # just jump to the new rev
578 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
578 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
579 if not partial:
579 if not partial:
580 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
580 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
581
581
582 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
582 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
583
583
584 if not partial:
584 if not partial:
585 repo.dirstate.setparents(fp1, fp2)
585 repo.dirstate.setparents(fp1, fp2)
586 recordupdates(repo, action, branchmerge)
586 recordupdates(repo, action, branchmerge)
587 if not branchmerge:
587 if not branchmerge:
588 repo.dirstate.setbranch(p2.branch())
588 repo.dirstate.setbranch(p2.branch())
589 finally:
589 finally:
590 wlock.release()
590 wlock.release()
591
591
592 if not partial:
592 if not partial:
593 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
593 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
594 return stats
594 return stats
General Comments 0
You need to be logged in to leave comments. Login now