##// END OF EJS Templates
merge crew and main
Benoit Boissinot -
r18653:17014216 merge default
parent child Browse files
Show More
@@ -1,800 +1,816 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import os, stat, errno
12 import os, stat, errno, gc
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 class repocache(filecache):
18 class repocache(filecache):
19 """filecache for files in .hg/"""
19 """filecache for files in .hg/"""
20 def join(self, obj, fname):
20 def join(self, obj, fname):
21 return obj._opener.join(fname)
21 return obj._opener.join(fname)
22
22
23 class rootcache(filecache):
23 class rootcache(filecache):
24 """filecache for files in the repository root"""
24 """filecache for files in the repository root"""
25 def join(self, obj, fname):
25 def join(self, obj, fname):
26 return obj._join(fname)
26 return obj._join(fname)
27
27
28 def _finddirs(path):
28 def _finddirs(path):
29 pos = path.rfind('/')
29 pos = path.rfind('/')
30 while pos != -1:
30 while pos != -1:
31 yield path[:pos]
31 yield path[:pos]
32 pos = path.rfind('/', 0, pos)
32 pos = path.rfind('/', 0, pos)
33
33
34 def _incdirs(dirs, path):
34 def _incdirs(dirs, path):
35 for base in _finddirs(path):
35 for base in _finddirs(path):
36 if base in dirs:
36 if base in dirs:
37 dirs[base] += 1
37 dirs[base] += 1
38 return
38 return
39 dirs[base] = 1
39 dirs[base] = 1
40
40
41 def _decdirs(dirs, path):
41 def _decdirs(dirs, path):
42 for base in _finddirs(path):
42 for base in _finddirs(path):
43 if dirs[base] > 1:
43 if dirs[base] > 1:
44 dirs[base] -= 1
44 dirs[base] -= 1
45 return
45 return
46 del dirs[base]
46 del dirs[base]
47
47
48 class dirstate(object):
48 class dirstate(object):
49
49
50 def __init__(self, opener, ui, root, validate):
50 def __init__(self, opener, ui, root, validate):
51 '''Create a new dirstate object.
51 '''Create a new dirstate object.
52
52
53 opener is an open()-like callable that can be used to open the
53 opener is an open()-like callable that can be used to open the
54 dirstate file; root is the root of the directory tracked by
54 dirstate file; root is the root of the directory tracked by
55 the dirstate.
55 the dirstate.
56 '''
56 '''
57 self._opener = opener
57 self._opener = opener
58 self._validate = validate
58 self._validate = validate
59 self._root = root
59 self._root = root
60 self._rootdir = os.path.join(root, '')
60 self._rootdir = os.path.join(root, '')
61 self._dirty = False
61 self._dirty = False
62 self._dirtypl = False
62 self._dirtypl = False
63 self._lastnormaltime = 0
63 self._lastnormaltime = 0
64 self._ui = ui
64 self._ui = ui
65 self._filecache = {}
65 self._filecache = {}
66
66
67 @propertycache
67 @propertycache
68 def _map(self):
68 def _map(self):
69 '''Return the dirstate contents as a map from filename to
69 '''Return the dirstate contents as a map from filename to
70 (state, mode, size, time).'''
70 (state, mode, size, time).'''
71 self._read()
71 self._read()
72 return self._map
72 return self._map
73
73
74 @propertycache
74 @propertycache
75 def _copymap(self):
75 def _copymap(self):
76 self._read()
76 self._read()
77 return self._copymap
77 return self._copymap
78
78
79 @propertycache
79 @propertycache
80 def _foldmap(self):
80 def _foldmap(self):
81 f = {}
81 f = {}
82 for name in self._map:
82 for name in self._map:
83 f[util.normcase(name)] = name
83 f[util.normcase(name)] = name
84 for name in self._dirs:
84 for name in self._dirs:
85 f[util.normcase(name)] = name
85 f[util.normcase(name)] = name
86 f['.'] = '.' # prevents useless util.fspath() invocation
86 f['.'] = '.' # prevents useless util.fspath() invocation
87 return f
87 return f
88
88
89 @repocache('branch')
89 @repocache('branch')
90 def _branch(self):
90 def _branch(self):
91 try:
91 try:
92 return self._opener.read("branch").strip() or "default"
92 return self._opener.read("branch").strip() or "default"
93 except IOError, inst:
93 except IOError, inst:
94 if inst.errno != errno.ENOENT:
94 if inst.errno != errno.ENOENT:
95 raise
95 raise
96 return "default"
96 return "default"
97
97
98 @propertycache
98 @propertycache
99 def _pl(self):
99 def _pl(self):
100 try:
100 try:
101 fp = self._opener("dirstate")
101 fp = self._opener("dirstate")
102 st = fp.read(40)
102 st = fp.read(40)
103 fp.close()
103 fp.close()
104 l = len(st)
104 l = len(st)
105 if l == 40:
105 if l == 40:
106 return st[:20], st[20:40]
106 return st[:20], st[20:40]
107 elif l > 0 and l < 40:
107 elif l > 0 and l < 40:
108 raise util.Abort(_('working directory state appears damaged!'))
108 raise util.Abort(_('working directory state appears damaged!'))
109 except IOError, err:
109 except IOError, err:
110 if err.errno != errno.ENOENT:
110 if err.errno != errno.ENOENT:
111 raise
111 raise
112 return [nullid, nullid]
112 return [nullid, nullid]
113
113
114 @propertycache
114 @propertycache
115 def _dirs(self):
115 def _dirs(self):
116 dirs = {}
116 dirs = {}
117 for f, s in self._map.iteritems():
117 for f, s in self._map.iteritems():
118 if s[0] != 'r':
118 if s[0] != 'r':
119 _incdirs(dirs, f)
119 _incdirs(dirs, f)
120 return dirs
120 return dirs
121
121
122 def dirs(self):
122 def dirs(self):
123 return self._dirs
123 return self._dirs
124
124
125 @rootcache('.hgignore')
125 @rootcache('.hgignore')
126 def _ignore(self):
126 def _ignore(self):
127 files = [self._join('.hgignore')]
127 files = [self._join('.hgignore')]
128 for name, path in self._ui.configitems("ui"):
128 for name, path in self._ui.configitems("ui"):
129 if name == 'ignore' or name.startswith('ignore.'):
129 if name == 'ignore' or name.startswith('ignore.'):
130 files.append(util.expandpath(path))
130 files.append(util.expandpath(path))
131 return ignore.ignore(self._root, files, self._ui.warn)
131 return ignore.ignore(self._root, files, self._ui.warn)
132
132
133 @propertycache
133 @propertycache
134 def _slash(self):
134 def _slash(self):
135 return self._ui.configbool('ui', 'slash') and os.sep != '/'
135 return self._ui.configbool('ui', 'slash') and os.sep != '/'
136
136
137 @propertycache
137 @propertycache
138 def _checklink(self):
138 def _checklink(self):
139 return util.checklink(self._root)
139 return util.checklink(self._root)
140
140
141 @propertycache
141 @propertycache
142 def _checkexec(self):
142 def _checkexec(self):
143 return util.checkexec(self._root)
143 return util.checkexec(self._root)
144
144
145 @propertycache
145 @propertycache
146 def _checkcase(self):
146 def _checkcase(self):
147 return not util.checkcase(self._join('.hg'))
147 return not util.checkcase(self._join('.hg'))
148
148
149 def _join(self, f):
149 def _join(self, f):
150 # much faster than os.path.join()
150 # much faster than os.path.join()
151 # it's safe because f is always a relative path
151 # it's safe because f is always a relative path
152 return self._rootdir + f
152 return self._rootdir + f
153
153
154 def flagfunc(self, buildfallback):
154 def flagfunc(self, buildfallback):
155 if self._checklink and self._checkexec:
155 if self._checklink and self._checkexec:
156 def f(x):
156 def f(x):
157 p = self._join(x)
157 p = self._join(x)
158 if os.path.islink(p):
158 if os.path.islink(p):
159 return 'l'
159 return 'l'
160 if util.isexec(p):
160 if util.isexec(p):
161 return 'x'
161 return 'x'
162 return ''
162 return ''
163 return f
163 return f
164
164
165 fallback = buildfallback()
165 fallback = buildfallback()
166 if self._checklink:
166 if self._checklink:
167 def f(x):
167 def f(x):
168 if os.path.islink(self._join(x)):
168 if os.path.islink(self._join(x)):
169 return 'l'
169 return 'l'
170 if 'x' in fallback(x):
170 if 'x' in fallback(x):
171 return 'x'
171 return 'x'
172 return ''
172 return ''
173 return f
173 return f
174 if self._checkexec:
174 if self._checkexec:
175 def f(x):
175 def f(x):
176 if 'l' in fallback(x):
176 if 'l' in fallback(x):
177 return 'l'
177 return 'l'
178 if util.isexec(self._join(x)):
178 if util.isexec(self._join(x)):
179 return 'x'
179 return 'x'
180 return ''
180 return ''
181 return f
181 return f
182 else:
182 else:
183 return fallback
183 return fallback
184
184
185 def getcwd(self):
185 def getcwd(self):
186 cwd = os.getcwd()
186 cwd = os.getcwd()
187 if cwd == self._root:
187 if cwd == self._root:
188 return ''
188 return ''
189 # self._root ends with a path separator if self._root is '/' or 'C:\'
189 # self._root ends with a path separator if self._root is '/' or 'C:\'
190 rootsep = self._root
190 rootsep = self._root
191 if not util.endswithsep(rootsep):
191 if not util.endswithsep(rootsep):
192 rootsep += os.sep
192 rootsep += os.sep
193 if cwd.startswith(rootsep):
193 if cwd.startswith(rootsep):
194 return cwd[len(rootsep):]
194 return cwd[len(rootsep):]
195 else:
195 else:
196 # we're outside the repo. return an absolute path.
196 # we're outside the repo. return an absolute path.
197 return cwd
197 return cwd
198
198
199 def pathto(self, f, cwd=None):
199 def pathto(self, f, cwd=None):
200 if cwd is None:
200 if cwd is None:
201 cwd = self.getcwd()
201 cwd = self.getcwd()
202 path = util.pathto(self._root, cwd, f)
202 path = util.pathto(self._root, cwd, f)
203 if self._slash:
203 if self._slash:
204 return util.normpath(path)
204 return util.normpath(path)
205 return path
205 return path
206
206
207 def __getitem__(self, key):
207 def __getitem__(self, key):
208 '''Return the current state of key (a filename) in the dirstate.
208 '''Return the current state of key (a filename) in the dirstate.
209
209
210 States are:
210 States are:
211 n normal
211 n normal
212 m needs merging
212 m needs merging
213 r marked for removal
213 r marked for removal
214 a marked for addition
214 a marked for addition
215 ? not tracked
215 ? not tracked
216 '''
216 '''
217 return self._map.get(key, ("?",))[0]
217 return self._map.get(key, ("?",))[0]
218
218
219 def __contains__(self, key):
219 def __contains__(self, key):
220 return key in self._map
220 return key in self._map
221
221
222 def __iter__(self):
222 def __iter__(self):
223 for x in sorted(self._map):
223 for x in sorted(self._map):
224 yield x
224 yield x
225
225
226 def parents(self):
226 def parents(self):
227 return [self._validate(p) for p in self._pl]
227 return [self._validate(p) for p in self._pl]
228
228
229 def p1(self):
229 def p1(self):
230 return self._validate(self._pl[0])
230 return self._validate(self._pl[0])
231
231
232 def p2(self):
232 def p2(self):
233 return self._validate(self._pl[1])
233 return self._validate(self._pl[1])
234
234
235 def branch(self):
235 def branch(self):
236 return encoding.tolocal(self._branch)
236 return encoding.tolocal(self._branch)
237
237
238 def setparents(self, p1, p2=nullid):
238 def setparents(self, p1, p2=nullid):
239 """Set dirstate parents to p1 and p2.
239 """Set dirstate parents to p1 and p2.
240
240
241 When moving from two parents to one, 'm' merged entries a
241 When moving from two parents to one, 'm' merged entries a
242 adjusted to normal and previous copy records discarded and
242 adjusted to normal and previous copy records discarded and
243 returned by the call.
243 returned by the call.
244
244
245 See localrepo.setparents()
245 See localrepo.setparents()
246 """
246 """
247 self._dirty = self._dirtypl = True
247 self._dirty = self._dirtypl = True
248 oldp2 = self._pl[1]
248 oldp2 = self._pl[1]
249 self._pl = p1, p2
249 self._pl = p1, p2
250 copies = {}
250 copies = {}
251 if oldp2 != nullid and p2 == nullid:
251 if oldp2 != nullid and p2 == nullid:
252 # Discard 'm' markers when moving away from a merge state
252 # Discard 'm' markers when moving away from a merge state
253 for f, s in self._map.iteritems():
253 for f, s in self._map.iteritems():
254 if s[0] == 'm':
254 if s[0] == 'm':
255 if f in self._copymap:
255 if f in self._copymap:
256 copies[f] = self._copymap[f]
256 copies[f] = self._copymap[f]
257 self.normallookup(f)
257 self.normallookup(f)
258 return copies
258 return copies
259
259
260 def setbranch(self, branch):
260 def setbranch(self, branch):
261 self._branch = encoding.fromlocal(branch)
261 self._branch = encoding.fromlocal(branch)
262 f = self._opener('branch', 'w', atomictemp=True)
262 f = self._opener('branch', 'w', atomictemp=True)
263 try:
263 try:
264 f.write(self._branch + '\n')
264 f.write(self._branch + '\n')
265 f.close()
265 f.close()
266
266
267 # make sure filecache has the correct stat info for _branch after
267 # make sure filecache has the correct stat info for _branch after
268 # replacing the underlying file
268 # replacing the underlying file
269 ce = self._filecache['_branch']
269 ce = self._filecache['_branch']
270 if ce:
270 if ce:
271 ce.refresh()
271 ce.refresh()
272 except: # re-raises
272 except: # re-raises
273 f.discard()
273 f.discard()
274 raise
274 raise
275
275
276 def _read(self):
276 def _read(self):
277 self._map = {}
277 self._map = {}
278 self._copymap = {}
278 self._copymap = {}
279 try:
279 try:
280 st = self._opener.read("dirstate")
280 st = self._opener.read("dirstate")
281 except IOError, err:
281 except IOError, err:
282 if err.errno != errno.ENOENT:
282 if err.errno != errno.ENOENT:
283 raise
283 raise
284 return
284 return
285 if not st:
285 if not st:
286 return
286 return
287
287
288 # Python's garbage collector triggers a GC each time a certain number
289 # of container objects (the number being defined by
290 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
291 # for each file in the dirstate. The C version then immediately marks
292 # them as not to be tracked by the collector. However, this has no
293 # effect on when GCs are triggered, only on what objects the GC looks
294 # into. This means that O(number of files) GCs are unavoidable.
295 # Depending on when in the process's lifetime the dirstate is parsed,
296 # this can get very expensive. As a workaround, disable GC while
297 # parsing the dirstate.
298 gcenabled = gc.isenabled()
299 gc.disable()
300 try:
288 p = parsers.parse_dirstate(self._map, self._copymap, st)
301 p = parsers.parse_dirstate(self._map, self._copymap, st)
302 finally:
303 if gcenabled:
304 gc.enable()
289 if not self._dirtypl:
305 if not self._dirtypl:
290 self._pl = p
306 self._pl = p
291
307
292 def invalidate(self):
308 def invalidate(self):
293 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
309 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
294 "_ignore"):
310 "_ignore"):
295 if a in self.__dict__:
311 if a in self.__dict__:
296 delattr(self, a)
312 delattr(self, a)
297 self._lastnormaltime = 0
313 self._lastnormaltime = 0
298 self._dirty = False
314 self._dirty = False
299
315
300 def copy(self, source, dest):
316 def copy(self, source, dest):
301 """Mark dest as a copy of source. Unmark dest if source is None."""
317 """Mark dest as a copy of source. Unmark dest if source is None."""
302 if source == dest:
318 if source == dest:
303 return
319 return
304 self._dirty = True
320 self._dirty = True
305 if source is not None:
321 if source is not None:
306 self._copymap[dest] = source
322 self._copymap[dest] = source
307 elif dest in self._copymap:
323 elif dest in self._copymap:
308 del self._copymap[dest]
324 del self._copymap[dest]
309
325
310 def copied(self, file):
326 def copied(self, file):
311 return self._copymap.get(file, None)
327 return self._copymap.get(file, None)
312
328
313 def copies(self):
329 def copies(self):
314 return self._copymap
330 return self._copymap
315
331
316 def _droppath(self, f):
332 def _droppath(self, f):
317 if self[f] not in "?r" and "_dirs" in self.__dict__:
333 if self[f] not in "?r" and "_dirs" in self.__dict__:
318 _decdirs(self._dirs, f)
334 _decdirs(self._dirs, f)
319
335
320 def _addpath(self, f, state, mode, size, mtime):
336 def _addpath(self, f, state, mode, size, mtime):
321 oldstate = self[f]
337 oldstate = self[f]
322 if state == 'a' or oldstate == 'r':
338 if state == 'a' or oldstate == 'r':
323 scmutil.checkfilename(f)
339 scmutil.checkfilename(f)
324 if f in self._dirs:
340 if f in self._dirs:
325 raise util.Abort(_('directory %r already in dirstate') % f)
341 raise util.Abort(_('directory %r already in dirstate') % f)
326 # shadows
342 # shadows
327 for d in _finddirs(f):
343 for d in _finddirs(f):
328 if d in self._dirs:
344 if d in self._dirs:
329 break
345 break
330 if d in self._map and self[d] != 'r':
346 if d in self._map and self[d] != 'r':
331 raise util.Abort(
347 raise util.Abort(
332 _('file %r in dirstate clashes with %r') % (d, f))
348 _('file %r in dirstate clashes with %r') % (d, f))
333 if oldstate in "?r" and "_dirs" in self.__dict__:
349 if oldstate in "?r" and "_dirs" in self.__dict__:
334 _incdirs(self._dirs, f)
350 _incdirs(self._dirs, f)
335 self._dirty = True
351 self._dirty = True
336 self._map[f] = (state, mode, size, mtime)
352 self._map[f] = (state, mode, size, mtime)
337
353
338 def normal(self, f):
354 def normal(self, f):
339 '''Mark a file normal and clean.'''
355 '''Mark a file normal and clean.'''
340 s = os.lstat(self._join(f))
356 s = os.lstat(self._join(f))
341 mtime = int(s.st_mtime)
357 mtime = int(s.st_mtime)
342 self._addpath(f, 'n', s.st_mode,
358 self._addpath(f, 'n', s.st_mode,
343 s.st_size & _rangemask, mtime & _rangemask)
359 s.st_size & _rangemask, mtime & _rangemask)
344 if f in self._copymap:
360 if f in self._copymap:
345 del self._copymap[f]
361 del self._copymap[f]
346 if mtime > self._lastnormaltime:
362 if mtime > self._lastnormaltime:
347 # Remember the most recent modification timeslot for status(),
363 # Remember the most recent modification timeslot for status(),
348 # to make sure we won't miss future size-preserving file content
364 # to make sure we won't miss future size-preserving file content
349 # modifications that happen within the same timeslot.
365 # modifications that happen within the same timeslot.
350 self._lastnormaltime = mtime
366 self._lastnormaltime = mtime
351
367
352 def normallookup(self, f):
368 def normallookup(self, f):
353 '''Mark a file normal, but possibly dirty.'''
369 '''Mark a file normal, but possibly dirty.'''
354 if self._pl[1] != nullid and f in self._map:
370 if self._pl[1] != nullid and f in self._map:
355 # if there is a merge going on and the file was either
371 # if there is a merge going on and the file was either
356 # in state 'm' (-1) or coming from other parent (-2) before
372 # in state 'm' (-1) or coming from other parent (-2) before
357 # being removed, restore that state.
373 # being removed, restore that state.
358 entry = self._map[f]
374 entry = self._map[f]
359 if entry[0] == 'r' and entry[2] in (-1, -2):
375 if entry[0] == 'r' and entry[2] in (-1, -2):
360 source = self._copymap.get(f)
376 source = self._copymap.get(f)
361 if entry[2] == -1:
377 if entry[2] == -1:
362 self.merge(f)
378 self.merge(f)
363 elif entry[2] == -2:
379 elif entry[2] == -2:
364 self.otherparent(f)
380 self.otherparent(f)
365 if source:
381 if source:
366 self.copy(source, f)
382 self.copy(source, f)
367 return
383 return
368 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
384 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
369 return
385 return
370 self._addpath(f, 'n', 0, -1, -1)
386 self._addpath(f, 'n', 0, -1, -1)
371 if f in self._copymap:
387 if f in self._copymap:
372 del self._copymap[f]
388 del self._copymap[f]
373
389
374 def otherparent(self, f):
390 def otherparent(self, f):
375 '''Mark as coming from the other parent, always dirty.'''
391 '''Mark as coming from the other parent, always dirty.'''
376 if self._pl[1] == nullid:
392 if self._pl[1] == nullid:
377 raise util.Abort(_("setting %r to other parent "
393 raise util.Abort(_("setting %r to other parent "
378 "only allowed in merges") % f)
394 "only allowed in merges") % f)
379 self._addpath(f, 'n', 0, -2, -1)
395 self._addpath(f, 'n', 0, -2, -1)
380 if f in self._copymap:
396 if f in self._copymap:
381 del self._copymap[f]
397 del self._copymap[f]
382
398
383 def add(self, f):
399 def add(self, f):
384 '''Mark a file added.'''
400 '''Mark a file added.'''
385 self._addpath(f, 'a', 0, -1, -1)
401 self._addpath(f, 'a', 0, -1, -1)
386 if f in self._copymap:
402 if f in self._copymap:
387 del self._copymap[f]
403 del self._copymap[f]
388
404
389 def remove(self, f):
405 def remove(self, f):
390 '''Mark a file removed.'''
406 '''Mark a file removed.'''
391 self._dirty = True
407 self._dirty = True
392 self._droppath(f)
408 self._droppath(f)
393 size = 0
409 size = 0
394 if self._pl[1] != nullid and f in self._map:
410 if self._pl[1] != nullid and f in self._map:
395 # backup the previous state
411 # backup the previous state
396 entry = self._map[f]
412 entry = self._map[f]
397 if entry[0] == 'm': # merge
413 if entry[0] == 'm': # merge
398 size = -1
414 size = -1
399 elif entry[0] == 'n' and entry[2] == -2: # other parent
415 elif entry[0] == 'n' and entry[2] == -2: # other parent
400 size = -2
416 size = -2
401 self._map[f] = ('r', 0, size, 0)
417 self._map[f] = ('r', 0, size, 0)
402 if size == 0 and f in self._copymap:
418 if size == 0 and f in self._copymap:
403 del self._copymap[f]
419 del self._copymap[f]
404
420
405 def merge(self, f):
421 def merge(self, f):
406 '''Mark a file merged.'''
422 '''Mark a file merged.'''
407 if self._pl[1] == nullid:
423 if self._pl[1] == nullid:
408 return self.normallookup(f)
424 return self.normallookup(f)
409 s = os.lstat(self._join(f))
425 s = os.lstat(self._join(f))
410 self._addpath(f, 'm', s.st_mode,
426 self._addpath(f, 'm', s.st_mode,
411 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
427 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
412 if f in self._copymap:
428 if f in self._copymap:
413 del self._copymap[f]
429 del self._copymap[f]
414
430
415 def drop(self, f):
431 def drop(self, f):
416 '''Drop a file from the dirstate'''
432 '''Drop a file from the dirstate'''
417 if f in self._map:
433 if f in self._map:
418 self._dirty = True
434 self._dirty = True
419 self._droppath(f)
435 self._droppath(f)
420 del self._map[f]
436 del self._map[f]
421
437
422 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
438 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
423 normed = util.normcase(path)
439 normed = util.normcase(path)
424 folded = self._foldmap.get(normed, None)
440 folded = self._foldmap.get(normed, None)
425 if folded is None:
441 if folded is None:
426 if isknown:
442 if isknown:
427 folded = path
443 folded = path
428 else:
444 else:
429 if exists is None:
445 if exists is None:
430 exists = os.path.lexists(os.path.join(self._root, path))
446 exists = os.path.lexists(os.path.join(self._root, path))
431 if not exists:
447 if not exists:
432 # Maybe a path component exists
448 # Maybe a path component exists
433 if not ignoremissing and '/' in path:
449 if not ignoremissing and '/' in path:
434 d, f = path.rsplit('/', 1)
450 d, f = path.rsplit('/', 1)
435 d = self._normalize(d, isknown, ignoremissing, None)
451 d = self._normalize(d, isknown, ignoremissing, None)
436 folded = d + "/" + f
452 folded = d + "/" + f
437 else:
453 else:
438 # No path components, preserve original case
454 # No path components, preserve original case
439 folded = path
455 folded = path
440 else:
456 else:
441 # recursively normalize leading directory components
457 # recursively normalize leading directory components
442 # against dirstate
458 # against dirstate
443 if '/' in normed:
459 if '/' in normed:
444 d, f = normed.rsplit('/', 1)
460 d, f = normed.rsplit('/', 1)
445 d = self._normalize(d, isknown, ignoremissing, True)
461 d = self._normalize(d, isknown, ignoremissing, True)
446 r = self._root + "/" + d
462 r = self._root + "/" + d
447 folded = d + "/" + util.fspath(f, r)
463 folded = d + "/" + util.fspath(f, r)
448 else:
464 else:
449 folded = util.fspath(normed, self._root)
465 folded = util.fspath(normed, self._root)
450 self._foldmap[normed] = folded
466 self._foldmap[normed] = folded
451
467
452 return folded
468 return folded
453
469
454 def normalize(self, path, isknown=False, ignoremissing=False):
470 def normalize(self, path, isknown=False, ignoremissing=False):
455 '''
471 '''
456 normalize the case of a pathname when on a casefolding filesystem
472 normalize the case of a pathname when on a casefolding filesystem
457
473
458 isknown specifies whether the filename came from walking the
474 isknown specifies whether the filename came from walking the
459 disk, to avoid extra filesystem access.
475 disk, to avoid extra filesystem access.
460
476
461 If ignoremissing is True, missing path are returned
477 If ignoremissing is True, missing path are returned
462 unchanged. Otherwise, we try harder to normalize possibly
478 unchanged. Otherwise, we try harder to normalize possibly
463 existing path components.
479 existing path components.
464
480
465 The normalized case is determined based on the following precedence:
481 The normalized case is determined based on the following precedence:
466
482
467 - version of name already stored in the dirstate
483 - version of name already stored in the dirstate
468 - version of name stored on disk
484 - version of name stored on disk
469 - version provided via command arguments
485 - version provided via command arguments
470 '''
486 '''
471
487
472 if self._checkcase:
488 if self._checkcase:
473 return self._normalize(path, isknown, ignoremissing)
489 return self._normalize(path, isknown, ignoremissing)
474 return path
490 return path
475
491
476 def clear(self):
492 def clear(self):
477 self._map = {}
493 self._map = {}
478 if "_dirs" in self.__dict__:
494 if "_dirs" in self.__dict__:
479 delattr(self, "_dirs")
495 delattr(self, "_dirs")
480 self._copymap = {}
496 self._copymap = {}
481 self._pl = [nullid, nullid]
497 self._pl = [nullid, nullid]
482 self._lastnormaltime = 0
498 self._lastnormaltime = 0
483 self._dirty = True
499 self._dirty = True
484
500
485 def rebuild(self, parent, files):
501 def rebuild(self, parent, files):
486 self.clear()
502 self.clear()
487 for f in files:
503 for f in files:
488 if 'x' in files.flags(f):
504 if 'x' in files.flags(f):
489 self._map[f] = ('n', 0777, -1, 0)
505 self._map[f] = ('n', 0777, -1, 0)
490 else:
506 else:
491 self._map[f] = ('n', 0666, -1, 0)
507 self._map[f] = ('n', 0666, -1, 0)
492 self._pl = (parent, nullid)
508 self._pl = (parent, nullid)
493 self._dirty = True
509 self._dirty = True
494
510
495 def write(self):
511 def write(self):
496 if not self._dirty:
512 if not self._dirty:
497 return
513 return
498 st = self._opener("dirstate", "w", atomictemp=True)
514 st = self._opener("dirstate", "w", atomictemp=True)
499
515
500 def finish(s):
516 def finish(s):
501 st.write(s)
517 st.write(s)
502 st.close()
518 st.close()
503 self._lastnormaltime = 0
519 self._lastnormaltime = 0
504 self._dirty = self._dirtypl = False
520 self._dirty = self._dirtypl = False
505
521
506 # use the modification time of the newly created temporary file as the
522 # use the modification time of the newly created temporary file as the
507 # filesystem's notion of 'now'
523 # filesystem's notion of 'now'
508 now = util.fstat(st).st_mtime
524 now = util.fstat(st).st_mtime
509 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
525 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
510
526
511 def _dirignore(self, f):
527 def _dirignore(self, f):
512 if f == '.':
528 if f == '.':
513 return False
529 return False
514 if self._ignore(f):
530 if self._ignore(f):
515 return True
531 return True
516 for p in _finddirs(f):
532 for p in _finddirs(f):
517 if self._ignore(p):
533 if self._ignore(p):
518 return True
534 return True
519 return False
535 return False
520
536
521 def walk(self, match, subrepos, unknown, ignored):
537 def walk(self, match, subrepos, unknown, ignored):
522 '''
538 '''
523 Walk recursively through the directory tree, finding all files
539 Walk recursively through the directory tree, finding all files
524 matched by match.
540 matched by match.
525
541
526 Return a dict mapping filename to stat-like object (either
542 Return a dict mapping filename to stat-like object (either
527 mercurial.osutil.stat instance or return value of os.stat()).
543 mercurial.osutil.stat instance or return value of os.stat()).
528 '''
544 '''
529
545
530 def fwarn(f, msg):
546 def fwarn(f, msg):
531 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
547 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
532 return False
548 return False
533
549
534 def badtype(mode):
550 def badtype(mode):
535 kind = _('unknown')
551 kind = _('unknown')
536 if stat.S_ISCHR(mode):
552 if stat.S_ISCHR(mode):
537 kind = _('character device')
553 kind = _('character device')
538 elif stat.S_ISBLK(mode):
554 elif stat.S_ISBLK(mode):
539 kind = _('block device')
555 kind = _('block device')
540 elif stat.S_ISFIFO(mode):
556 elif stat.S_ISFIFO(mode):
541 kind = _('fifo')
557 kind = _('fifo')
542 elif stat.S_ISSOCK(mode):
558 elif stat.S_ISSOCK(mode):
543 kind = _('socket')
559 kind = _('socket')
544 elif stat.S_ISDIR(mode):
560 elif stat.S_ISDIR(mode):
545 kind = _('directory')
561 kind = _('directory')
546 return _('unsupported file type (type is %s)') % kind
562 return _('unsupported file type (type is %s)') % kind
547
563
548 ignore = self._ignore
564 ignore = self._ignore
549 dirignore = self._dirignore
565 dirignore = self._dirignore
550 if ignored:
566 if ignored:
551 ignore = util.never
567 ignore = util.never
552 dirignore = util.never
568 dirignore = util.never
553 elif not unknown:
569 elif not unknown:
554 # if unknown and ignored are False, skip step 2
570 # if unknown and ignored are False, skip step 2
555 ignore = util.always
571 ignore = util.always
556 dirignore = util.always
572 dirignore = util.always
557
573
558 matchfn = match.matchfn
574 matchfn = match.matchfn
559 badfn = match.bad
575 badfn = match.bad
560 dmap = self._map
576 dmap = self._map
561 normpath = util.normpath
577 normpath = util.normpath
562 listdir = osutil.listdir
578 listdir = osutil.listdir
563 lstat = os.lstat
579 lstat = os.lstat
564 getkind = stat.S_IFMT
580 getkind = stat.S_IFMT
565 dirkind = stat.S_IFDIR
581 dirkind = stat.S_IFDIR
566 regkind = stat.S_IFREG
582 regkind = stat.S_IFREG
567 lnkkind = stat.S_IFLNK
583 lnkkind = stat.S_IFLNK
568 join = self._join
584 join = self._join
569 work = []
585 work = []
570 wadd = work.append
586 wadd = work.append
571
587
572 exact = skipstep3 = False
588 exact = skipstep3 = False
573 if matchfn == match.exact: # match.exact
589 if matchfn == match.exact: # match.exact
574 exact = True
590 exact = True
575 dirignore = util.always # skip step 2
591 dirignore = util.always # skip step 2
576 elif match.files() and not match.anypats(): # match.match, no patterns
592 elif match.files() and not match.anypats(): # match.match, no patterns
577 skipstep3 = True
593 skipstep3 = True
578
594
579 if not exact and self._checkcase:
595 if not exact and self._checkcase:
580 normalize = self._normalize
596 normalize = self._normalize
581 skipstep3 = False
597 skipstep3 = False
582 else:
598 else:
583 normalize = None
599 normalize = None
584
600
585 files = sorted(match.files())
601 files = sorted(match.files())
586 subrepos.sort()
602 subrepos.sort()
587 i, j = 0, 0
603 i, j = 0, 0
588 while i < len(files) and j < len(subrepos):
604 while i < len(files) and j < len(subrepos):
589 subpath = subrepos[j] + "/"
605 subpath = subrepos[j] + "/"
590 if files[i] < subpath:
606 if files[i] < subpath:
591 i += 1
607 i += 1
592 continue
608 continue
593 while i < len(files) and files[i].startswith(subpath):
609 while i < len(files) and files[i].startswith(subpath):
594 del files[i]
610 del files[i]
595 j += 1
611 j += 1
596
612
597 if not files or '.' in files:
613 if not files or '.' in files:
598 files = ['']
614 files = ['']
599 results = dict.fromkeys(subrepos)
615 results = dict.fromkeys(subrepos)
600 results['.hg'] = None
616 results['.hg'] = None
601
617
602 # step 1: find all explicit files
618 # step 1: find all explicit files
603 for ff in files:
619 for ff in files:
604 if normalize:
620 if normalize:
605 nf = normalize(normpath(ff), False, True)
621 nf = normalize(normpath(ff), False, True)
606 else:
622 else:
607 nf = normpath(ff)
623 nf = normpath(ff)
608 if nf in results:
624 if nf in results:
609 continue
625 continue
610
626
611 try:
627 try:
612 st = lstat(join(nf))
628 st = lstat(join(nf))
613 kind = getkind(st.st_mode)
629 kind = getkind(st.st_mode)
614 if kind == dirkind:
630 if kind == dirkind:
615 skipstep3 = False
631 skipstep3 = False
616 if nf in dmap:
632 if nf in dmap:
617 #file deleted on disk but still in dirstate
633 #file deleted on disk but still in dirstate
618 results[nf] = None
634 results[nf] = None
619 match.dir(nf)
635 match.dir(nf)
620 if not dirignore(nf):
636 if not dirignore(nf):
621 wadd(nf)
637 wadd(nf)
622 elif kind == regkind or kind == lnkkind:
638 elif kind == regkind or kind == lnkkind:
623 results[nf] = st
639 results[nf] = st
624 else:
640 else:
625 badfn(ff, badtype(kind))
641 badfn(ff, badtype(kind))
626 if nf in dmap:
642 if nf in dmap:
627 results[nf] = None
643 results[nf] = None
628 except OSError, inst:
644 except OSError, inst:
629 if nf in dmap: # does it exactly match a file?
645 if nf in dmap: # does it exactly match a file?
630 results[nf] = None
646 results[nf] = None
631 else: # does it match a directory?
647 else: # does it match a directory?
632 prefix = nf + "/"
648 prefix = nf + "/"
633 for fn in dmap:
649 for fn in dmap:
634 if fn.startswith(prefix):
650 if fn.startswith(prefix):
635 match.dir(nf)
651 match.dir(nf)
636 skipstep3 = False
652 skipstep3 = False
637 break
653 break
638 else:
654 else:
639 badfn(ff, inst.strerror)
655 badfn(ff, inst.strerror)
640
656
641 # step 2: visit subdirectories
657 # step 2: visit subdirectories
642 while work:
658 while work:
643 nd = work.pop()
659 nd = work.pop()
644 skip = None
660 skip = None
645 if nd == '.':
661 if nd == '.':
646 nd = ''
662 nd = ''
647 else:
663 else:
648 skip = '.hg'
664 skip = '.hg'
649 try:
665 try:
650 entries = listdir(join(nd), stat=True, skip=skip)
666 entries = listdir(join(nd), stat=True, skip=skip)
651 except OSError, inst:
667 except OSError, inst:
652 if inst.errno in (errno.EACCES, errno.ENOENT):
668 if inst.errno in (errno.EACCES, errno.ENOENT):
653 fwarn(nd, inst.strerror)
669 fwarn(nd, inst.strerror)
654 continue
670 continue
655 raise
671 raise
656 for f, kind, st in entries:
672 for f, kind, st in entries:
657 if normalize:
673 if normalize:
658 nf = normalize(nd and (nd + "/" + f) or f, True, True)
674 nf = normalize(nd and (nd + "/" + f) or f, True, True)
659 else:
675 else:
660 nf = nd and (nd + "/" + f) or f
676 nf = nd and (nd + "/" + f) or f
661 if nf not in results:
677 if nf not in results:
662 if kind == dirkind:
678 if kind == dirkind:
663 if not ignore(nf):
679 if not ignore(nf):
664 match.dir(nf)
680 match.dir(nf)
665 wadd(nf)
681 wadd(nf)
666 if nf in dmap and matchfn(nf):
682 if nf in dmap and matchfn(nf):
667 results[nf] = None
683 results[nf] = None
668 elif kind == regkind or kind == lnkkind:
684 elif kind == regkind or kind == lnkkind:
669 if nf in dmap:
685 if nf in dmap:
670 if matchfn(nf):
686 if matchfn(nf):
671 results[nf] = st
687 results[nf] = st
672 elif matchfn(nf) and not ignore(nf):
688 elif matchfn(nf) and not ignore(nf):
673 results[nf] = st
689 results[nf] = st
674 elif nf in dmap and matchfn(nf):
690 elif nf in dmap and matchfn(nf):
675 results[nf] = None
691 results[nf] = None
676
692
677 # step 3: report unseen items in the dmap hash
693 # step 3: report unseen items in the dmap hash
678 if not skipstep3 and not exact:
694 if not skipstep3 and not exact:
679 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
695 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
680 if unknown:
696 if unknown:
681 # unknown == True means we walked the full directory tree above.
697 # unknown == True means we walked the full directory tree above.
682 # So if a file is not seen it was either a) not matching matchfn
698 # So if a file is not seen it was either a) not matching matchfn
683 # b) ignored, c) missing, or d) under a symlink directory.
699 # b) ignored, c) missing, or d) under a symlink directory.
684 audit_path = scmutil.pathauditor(self._root)
700 audit_path = scmutil.pathauditor(self._root)
685
701
686 for nf in iter(visit):
702 for nf in iter(visit):
687 # Report ignored items in the dmap as long as they are not
703 # Report ignored items in the dmap as long as they are not
688 # under a symlink directory.
704 # under a symlink directory.
689 if ignore(nf) and audit_path.check(nf):
705 if ignore(nf) and audit_path.check(nf):
690 results[nf] = util.statfiles([join(nf)])[0]
706 results[nf] = util.statfiles([join(nf)])[0]
691 else:
707 else:
692 # It's either missing or under a symlink directory
708 # It's either missing or under a symlink directory
693 results[nf] = None
709 results[nf] = None
694 else:
710 else:
695 # We may not have walked the full directory tree above,
711 # We may not have walked the full directory tree above,
696 # so stat everything we missed.
712 # so stat everything we missed.
697 nf = iter(visit).next
713 nf = iter(visit).next
698 for st in util.statfiles([join(i) for i in visit]):
714 for st in util.statfiles([join(i) for i in visit]):
699 results[nf()] = st
715 results[nf()] = st
700 for s in subrepos:
716 for s in subrepos:
701 del results[s]
717 del results[s]
702 del results['.hg']
718 del results['.hg']
703 return results
719 return results
704
720
705 def status(self, match, subrepos, ignored, clean, unknown):
721 def status(self, match, subrepos, ignored, clean, unknown):
706 '''Determine the status of the working copy relative to the
722 '''Determine the status of the working copy relative to the
707 dirstate and return a tuple of lists (unsure, modified, added,
723 dirstate and return a tuple of lists (unsure, modified, added,
708 removed, deleted, unknown, ignored, clean), where:
724 removed, deleted, unknown, ignored, clean), where:
709
725
710 unsure:
726 unsure:
711 files that might have been modified since the dirstate was
727 files that might have been modified since the dirstate was
712 written, but need to be read to be sure (size is the same
728 written, but need to be read to be sure (size is the same
713 but mtime differs)
729 but mtime differs)
714 modified:
730 modified:
715 files that have definitely been modified since the dirstate
731 files that have definitely been modified since the dirstate
716 was written (different size or mode)
732 was written (different size or mode)
717 added:
733 added:
718 files that have been explicitly added with hg add
734 files that have been explicitly added with hg add
719 removed:
735 removed:
720 files that have been explicitly removed with hg remove
736 files that have been explicitly removed with hg remove
721 deleted:
737 deleted:
722 files that have been deleted through other means ("missing")
738 files that have been deleted through other means ("missing")
723 unknown:
739 unknown:
724 files not in the dirstate that are not ignored
740 files not in the dirstate that are not ignored
725 ignored:
741 ignored:
726 files not in the dirstate that are ignored
742 files not in the dirstate that are ignored
727 (by _dirignore())
743 (by _dirignore())
728 clean:
744 clean:
729 files that have definitely not been modified since the
745 files that have definitely not been modified since the
730 dirstate was written
746 dirstate was written
731 '''
747 '''
732 listignored, listclean, listunknown = ignored, clean, unknown
748 listignored, listclean, listunknown = ignored, clean, unknown
733 lookup, modified, added, unknown, ignored = [], [], [], [], []
749 lookup, modified, added, unknown, ignored = [], [], [], [], []
734 removed, deleted, clean = [], [], []
750 removed, deleted, clean = [], [], []
735
751
736 dmap = self._map
752 dmap = self._map
737 ladd = lookup.append # aka "unsure"
753 ladd = lookup.append # aka "unsure"
738 madd = modified.append
754 madd = modified.append
739 aadd = added.append
755 aadd = added.append
740 uadd = unknown.append
756 uadd = unknown.append
741 iadd = ignored.append
757 iadd = ignored.append
742 radd = removed.append
758 radd = removed.append
743 dadd = deleted.append
759 dadd = deleted.append
744 cadd = clean.append
760 cadd = clean.append
745 mexact = match.exact
761 mexact = match.exact
746 dirignore = self._dirignore
762 dirignore = self._dirignore
747 checkexec = self._checkexec
763 checkexec = self._checkexec
748 checklink = self._checklink
764 checklink = self._checklink
749 copymap = self._copymap
765 copymap = self._copymap
750 lastnormaltime = self._lastnormaltime
766 lastnormaltime = self._lastnormaltime
751
767
752 lnkkind = stat.S_IFLNK
768 lnkkind = stat.S_IFLNK
753
769
754 for fn, st in self.walk(match, subrepos, listunknown,
770 for fn, st in self.walk(match, subrepos, listunknown,
755 listignored).iteritems():
771 listignored).iteritems():
756 if fn not in dmap:
772 if fn not in dmap:
757 if (listignored or mexact(fn)) and dirignore(fn):
773 if (listignored or mexact(fn)) and dirignore(fn):
758 if listignored:
774 if listignored:
759 iadd(fn)
775 iadd(fn)
760 elif listunknown:
776 elif listunknown:
761 uadd(fn)
777 uadd(fn)
762 continue
778 continue
763
779
764 state, mode, size, time = dmap[fn]
780 state, mode, size, time = dmap[fn]
765
781
766 if not st and state in "nma":
782 if not st and state in "nma":
767 dadd(fn)
783 dadd(fn)
768 elif state == 'n':
784 elif state == 'n':
769 # The "mode & lnkkind != lnkkind or self._checklink"
785 # The "mode & lnkkind != lnkkind or self._checklink"
770 # lines are an expansion of "islink => checklink"
786 # lines are an expansion of "islink => checklink"
771 # where islink means "is this a link?" and checklink
787 # where islink means "is this a link?" and checklink
772 # means "can we check links?".
788 # means "can we check links?".
773 mtime = int(st.st_mtime)
789 mtime = int(st.st_mtime)
774 if (size >= 0 and
790 if (size >= 0 and
775 ((size != st.st_size and size != st.st_size & _rangemask)
791 ((size != st.st_size and size != st.st_size & _rangemask)
776 or ((mode ^ st.st_mode) & 0100 and checkexec))
792 or ((mode ^ st.st_mode) & 0100 and checkexec))
777 and (mode & lnkkind != lnkkind or checklink)
793 and (mode & lnkkind != lnkkind or checklink)
778 or size == -2 # other parent
794 or size == -2 # other parent
779 or fn in copymap):
795 or fn in copymap):
780 madd(fn)
796 madd(fn)
781 elif ((time != mtime and time != mtime & _rangemask)
797 elif ((time != mtime and time != mtime & _rangemask)
782 and (mode & lnkkind != lnkkind or checklink)):
798 and (mode & lnkkind != lnkkind or checklink)):
783 ladd(fn)
799 ladd(fn)
784 elif mtime == lastnormaltime:
800 elif mtime == lastnormaltime:
785 # fn may have been changed in the same timeslot without
801 # fn may have been changed in the same timeslot without
786 # changing its size. This can happen if we quickly do
802 # changing its size. This can happen if we quickly do
787 # multiple commits in a single transaction.
803 # multiple commits in a single transaction.
788 # Force lookup, so we don't miss such a racy file change.
804 # Force lookup, so we don't miss such a racy file change.
789 ladd(fn)
805 ladd(fn)
790 elif listclean:
806 elif listclean:
791 cadd(fn)
807 cadd(fn)
792 elif state == 'm':
808 elif state == 'm':
793 madd(fn)
809 madd(fn)
794 elif state == 'a':
810 elif state == 'a':
795 aadd(fn)
811 aadd(fn)
796 elif state == 'r':
812 elif state == 'r':
797 radd(fn)
813 radd(fn)
798
814
799 return (lookup, modified, added, removed, deleted, unknown, ignored,
815 return (lookup, modified, added, removed, deleted, unknown, ignored,
800 clean)
816 clean)
@@ -1,708 +1,716 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import error, util, filemerge, copies, subrepo, worker
10 import error, util, filemerge, copies, subrepo, worker
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._dirty = False
17 self._dirty = False
18 self._read()
18 self._read()
19 def reset(self, node=None):
19 def reset(self, node=None):
20 self._state = {}
20 self._state = {}
21 if node:
21 if node:
22 self._local = node
22 self._local = node
23 shutil.rmtree(self._repo.join("merge"), True)
23 shutil.rmtree(self._repo.join("merge"), True)
24 self._dirty = False
24 self._dirty = False
25 def _read(self):
25 def _read(self):
26 self._state = {}
26 self._state = {}
27 try:
27 try:
28 f = self._repo.opener("merge/state")
28 f = self._repo.opener("merge/state")
29 for i, l in enumerate(f):
29 for i, l in enumerate(f):
30 if i == 0:
30 if i == 0:
31 self._local = bin(l[:-1])
31 self._local = bin(l[:-1])
32 else:
32 else:
33 bits = l[:-1].split("\0")
33 bits = l[:-1].split("\0")
34 self._state[bits[0]] = bits[1:]
34 self._state[bits[0]] = bits[1:]
35 f.close()
35 f.close()
36 except IOError, err:
36 except IOError, err:
37 if err.errno != errno.ENOENT:
37 if err.errno != errno.ENOENT:
38 raise
38 raise
39 self._dirty = False
39 self._dirty = False
40 def commit(self):
40 def commit(self):
41 if self._dirty:
41 if self._dirty:
42 f = self._repo.opener("merge/state", "w")
42 f = self._repo.opener("merge/state", "w")
43 f.write(hex(self._local) + "\n")
43 f.write(hex(self._local) + "\n")
44 for d, v in self._state.iteritems():
44 for d, v in self._state.iteritems():
45 f.write("\0".join([d] + v) + "\n")
45 f.write("\0".join([d] + v) + "\n")
46 f.close()
46 f.close()
47 self._dirty = False
47 self._dirty = False
48 def add(self, fcl, fco, fca, fd):
48 def add(self, fcl, fco, fca, fd):
49 hash = util.sha1(fcl.path()).hexdigest()
49 hash = util.sha1(fcl.path()).hexdigest()
50 self._repo.opener.write("merge/" + hash, fcl.data())
50 self._repo.opener.write("merge/" + hash, fcl.data())
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 hex(fca.filenode()), fco.path(), fcl.flags()]
52 hex(fca.filenode()), fco.path(), fcl.flags()]
53 self._dirty = True
53 self._dirty = True
54 def __contains__(self, dfile):
54 def __contains__(self, dfile):
55 return dfile in self._state
55 return dfile in self._state
56 def __getitem__(self, dfile):
56 def __getitem__(self, dfile):
57 return self._state[dfile][0]
57 return self._state[dfile][0]
58 def __iter__(self):
58 def __iter__(self):
59 l = self._state.keys()
59 l = self._state.keys()
60 l.sort()
60 l.sort()
61 for f in l:
61 for f in l:
62 yield f
62 yield f
63 def mark(self, dfile, state):
63 def mark(self, dfile, state):
64 self._state[dfile][0] = state
64 self._state[dfile][0] = state
65 self._dirty = True
65 self._dirty = True
66 def resolve(self, dfile, wctx, octx):
66 def resolve(self, dfile, wctx, octx):
67 if self[dfile] == 'r':
67 if self[dfile] == 'r':
68 return 0
68 return 0
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 fcd = wctx[dfile]
70 fcd = wctx[dfile]
71 fco = octx[ofile]
71 fco = octx[ofile]
72 fca = self._repo.filectx(afile, fileid=anode)
72 fca = self._repo.filectx(afile, fileid=anode)
73 # "premerge" x flags
73 # "premerge" x flags
74 flo = fco.flags()
74 flo = fco.flags()
75 fla = fca.flags()
75 fla = fca.flags()
76 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
76 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
77 if fca.node() == nullid:
77 if fca.node() == nullid:
78 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
78 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
79 afile)
79 afile)
80 elif flags == fla:
80 elif flags == fla:
81 flags = flo
81 flags = flo
82 # restore local
82 # restore local
83 f = self._repo.opener("merge/" + hash)
83 f = self._repo.opener("merge/" + hash)
84 self._repo.wwrite(dfile, f.read(), flags)
84 self._repo.wwrite(dfile, f.read(), flags)
85 f.close()
85 f.close()
86 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
86 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
87 if r is None:
87 if r is None:
88 # no real conflict
88 # no real conflict
89 del self._state[dfile]
89 del self._state[dfile]
90 elif not r:
90 elif not r:
91 self.mark(dfile, 'r')
91 self.mark(dfile, 'r')
92 return r
92 return r
93
93
94 def _checkunknownfile(repo, wctx, mctx, f):
94 def _checkunknownfile(repo, wctx, mctx, f):
95 return (not repo.dirstate._ignore(f)
95 return (not repo.dirstate._ignore(f)
96 and os.path.isfile(repo.wjoin(f))
96 and os.path.isfile(repo.wjoin(f))
97 and repo.dirstate.normalize(f) not in repo.dirstate
97 and repo.dirstate.normalize(f) not in repo.dirstate
98 and mctx[f].cmp(wctx[f]))
98 and mctx[f].cmp(wctx[f]))
99
99
100 def _checkunknown(repo, wctx, mctx):
100 def _checkunknown(repo, wctx, mctx):
101 "check for collisions between unknown files and files in mctx"
101 "check for collisions between unknown files and files in mctx"
102
102
103 error = False
103 error = False
104 for f in mctx:
104 for f in mctx:
105 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
105 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
106 error = True
106 error = True
107 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
107 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
108 if error:
108 if error:
109 raise util.Abort(_("untracked files in working directory differ "
109 raise util.Abort(_("untracked files in working directory differ "
110 "from files in requested revision"))
110 "from files in requested revision"))
111
111
112 def _remains(f, m, ma, workingctx=False):
112 def _remains(f, m, ma, workingctx=False):
113 """check whether specified file remains after merge.
113 """check whether specified file remains after merge.
114
114
115 It is assumed that specified file is not contained in the manifest
115 It is assumed that specified file is not contained in the manifest
116 of the other context.
116 of the other context.
117 """
117 """
118 if f in ma:
118 if f in ma:
119 n = m[f]
119 n = m[f]
120 if n != ma[f]:
120 if n != ma[f]:
121 return True # because it is changed locally
121 return True # because it is changed locally
122 # even though it doesn't remain, if "remote deleted" is
122 # even though it doesn't remain, if "remote deleted" is
123 # chosen in manifestmerge()
123 # chosen in manifestmerge()
124 elif workingctx and n[20:] == "a":
124 elif workingctx and n[20:] == "a":
125 return True # because it is added locally (linear merge specific)
125 return True # because it is added locally (linear merge specific)
126 else:
126 else:
127 return False # because it is removed remotely
127 return False # because it is removed remotely
128 else:
128 else:
129 return True # because it is added locally
129 return True # because it is added locally
130
130
131 def _checkcollision(mctx, extractxs):
131 def _checkcollision(mctx, extractxs):
132 "check for case folding collisions in the destination context"
132 "check for case folding collisions in the destination context"
133 folded = {}
133 folded = {}
134 for fn in mctx:
134 for fn in mctx:
135 fold = util.normcase(fn)
135 fold = util.normcase(fn)
136 if fold in folded:
136 if fold in folded:
137 raise util.Abort(_("case-folding collision between %s and %s")
137 raise util.Abort(_("case-folding collision between %s and %s")
138 % (fn, folded[fold]))
138 % (fn, folded[fold]))
139 folded[fold] = fn
139 folded[fold] = fn
140
140
141 if extractxs:
141 if extractxs:
142 wctx, actx = extractxs
142 wctx, actx = extractxs
143 # class to delay looking up copy mapping
143 # class to delay looking up copy mapping
144 class pathcopies(object):
144 class pathcopies(object):
145 @util.propertycache
145 @util.propertycache
146 def map(self):
146 def map(self):
147 # {dst@mctx: src@wctx} copy mapping
147 # {dst@mctx: src@wctx} copy mapping
148 return copies.pathcopies(wctx, mctx)
148 return copies.pathcopies(wctx, mctx)
149 pc = pathcopies()
149 pc = pathcopies()
150
150
151 for fn in wctx:
151 for fn in wctx:
152 fold = util.normcase(fn)
152 fold = util.normcase(fn)
153 mfn = folded.get(fold, None)
153 mfn = folded.get(fold, None)
154 if (mfn and mfn != fn and pc.map.get(mfn) != fn and
154 if (mfn and mfn != fn and pc.map.get(mfn) != fn and
155 _remains(fn, wctx.manifest(), actx.manifest(), True) and
155 _remains(fn, wctx.manifest(), actx.manifest(), True) and
156 _remains(mfn, mctx.manifest(), actx.manifest())):
156 _remains(mfn, mctx.manifest(), actx.manifest())):
157 raise util.Abort(_("case-folding collision between %s and %s")
157 raise util.Abort(_("case-folding collision between %s and %s")
158 % (mfn, fn))
158 % (mfn, fn))
159
159
160 def _forgetremoved(wctx, mctx, branchmerge):
160 def _forgetremoved(wctx, mctx, branchmerge):
161 """
161 """
162 Forget removed files
162 Forget removed files
163
163
164 If we're jumping between revisions (as opposed to merging), and if
164 If we're jumping between revisions (as opposed to merging), and if
165 neither the working directory nor the target rev has the file,
165 neither the working directory nor the target rev has the file,
166 then we need to remove it from the dirstate, to prevent the
166 then we need to remove it from the dirstate, to prevent the
167 dirstate from listing the file when it is no longer in the
167 dirstate from listing the file when it is no longer in the
168 manifest.
168 manifest.
169
169
170 If we're merging, and the other revision has removed a file
170 If we're merging, and the other revision has removed a file
171 that is not present in the working directory, we need to mark it
171 that is not present in the working directory, we need to mark it
172 as removed.
172 as removed.
173 """
173 """
174
174
175 actions = []
175 actions = []
176 state = branchmerge and 'r' or 'f'
176 state = branchmerge and 'r' or 'f'
177 for f in wctx.deleted():
177 for f in wctx.deleted():
178 if f not in mctx:
178 if f not in mctx:
179 actions.append((f, state, None, "forget deleted"))
179 actions.append((f, state, None, "forget deleted"))
180
180
181 if not branchmerge:
181 if not branchmerge:
182 for f in wctx.removed():
182 for f in wctx.removed():
183 if f not in mctx:
183 if f not in mctx:
184 actions.append((f, "f", None, "forget removed"))
184 actions.append((f, "f", None, "forget removed"))
185
185
186 return actions
186 return actions
187
187
188 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial):
188 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial):
189 """
189 """
190 Merge p1 and p2 with ancestor pa and generate merge action list
190 Merge p1 and p2 with ancestor pa and generate merge action list
191
191
192 branchmerge and force are as passed in to update
192 branchmerge and force are as passed in to update
193 partial = function to filter file lists
193 partial = function to filter file lists
194 """
194 """
195
195
196 overwrite = force and not branchmerge
196 overwrite = force and not branchmerge
197 actions, copy, movewithdir = [], {}, {}
197 actions, copy, movewithdir = [], {}, {}
198
198
199 followcopies = False
199 if overwrite:
200 if overwrite:
200 pa = wctx
201 pa = wctx
201 elif pa == p2: # backwards
202 elif pa == p2: # backwards
202 pa = wctx.p1()
203 pa = wctx.p1()
203 elif not branchmerge and not wctx.dirty(missing=True):
204 elif not branchmerge and not wctx.dirty(missing=True):
204 pass
205 pass
205 elif pa and repo.ui.configbool("merge", "followcopies", True):
206 elif pa and repo.ui.configbool("merge", "followcopies", True):
207 followcopies = True
208
209 # manifests fetched in order are going to be faster, so prime the caches
210 [x.manifest() for x in
211 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
212
213 if followcopies:
206 ret = copies.mergecopies(repo, wctx, p2, pa)
214 ret = copies.mergecopies(repo, wctx, p2, pa)
207 copy, movewithdir, diverge, renamedelete = ret
215 copy, movewithdir, diverge, renamedelete = ret
208 for of, fl in diverge.iteritems():
216 for of, fl in diverge.iteritems():
209 actions.append((of, "dr", (fl,), "divergent renames"))
217 actions.append((of, "dr", (fl,), "divergent renames"))
210 for of, fl in renamedelete.iteritems():
218 for of, fl in renamedelete.iteritems():
211 actions.append((of, "rd", (fl,), "rename and delete"))
219 actions.append((of, "rd", (fl,), "rename and delete"))
212
220
213 repo.ui.note(_("resolving manifests\n"))
221 repo.ui.note(_("resolving manifests\n"))
214 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
222 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
215 % (bool(branchmerge), bool(force), bool(partial)))
223 % (bool(branchmerge), bool(force), bool(partial)))
216 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
224 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
217
225
218 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
226 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
219 copied = set(copy.values())
227 copied = set(copy.values())
220 copied.update(movewithdir.values())
228 copied.update(movewithdir.values())
221
229
222 if '.hgsubstate' in m1:
230 if '.hgsubstate' in m1:
223 # check whether sub state is modified
231 # check whether sub state is modified
224 for s in sorted(wctx.substate):
232 for s in sorted(wctx.substate):
225 if wctx.sub(s).dirty():
233 if wctx.sub(s).dirty():
226 m1['.hgsubstate'] += "+"
234 m1['.hgsubstate'] += "+"
227 break
235 break
228
236
229 aborts, prompts = [], []
237 aborts, prompts = [], []
230 # Compare manifests
238 # Compare manifests
231 for f, n in m1.iteritems():
239 for f, n in m1.iteritems():
232 if partial and not partial(f):
240 if partial and not partial(f):
233 continue
241 continue
234 if f in m2:
242 if f in m2:
235 n2 = m2[f]
243 n2 = m2[f]
236 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
244 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
237 nol = 'l' not in fl1 + fl2 + fla
245 nol = 'l' not in fl1 + fl2 + fla
238 a = ma.get(f, nullid)
246 a = ma.get(f, nullid)
239 if n == n2 and fl1 == fl2:
247 if n == n2 and fl1 == fl2:
240 pass # same - keep local
248 pass # same - keep local
241 elif n2 == a and fl2 == fla:
249 elif n2 == a and fl2 == fla:
242 pass # remote unchanged - keep local
250 pass # remote unchanged - keep local
243 elif n == a and fl1 == fla: # local unchanged - use remote
251 elif n == a and fl1 == fla: # local unchanged - use remote
244 if n == n2: # optimization: keep local content
252 if n == n2: # optimization: keep local content
245 actions.append((f, "e", (fl2,), "update permissions"))
253 actions.append((f, "e", (fl2,), "update permissions"))
246 else:
254 else:
247 actions.append((f, "g", (fl2,), "remote is newer"))
255 actions.append((f, "g", (fl2,), "remote is newer"))
248 elif nol and n2 == a: # remote only changed 'x'
256 elif nol and n2 == a: # remote only changed 'x'
249 actions.append((f, "e", (fl2,), "update permissions"))
257 actions.append((f, "e", (fl2,), "update permissions"))
250 elif nol and n == a: # local only changed 'x'
258 elif nol and n == a: # local only changed 'x'
251 actions.append((f, "g", (fl1,), "remote is newer"))
259 actions.append((f, "g", (fl1,), "remote is newer"))
252 else: # both changed something
260 else: # both changed something
253 actions.append((f, "m", (f, f, False), "versions differ"))
261 actions.append((f, "m", (f, f, False), "versions differ"))
254 elif f in copied: # files we'll deal with on m2 side
262 elif f in copied: # files we'll deal with on m2 side
255 pass
263 pass
256 elif f in movewithdir: # directory rename
264 elif f in movewithdir: # directory rename
257 f2 = movewithdir[f]
265 f2 = movewithdir[f]
258 actions.append((f, "d", (None, f2, m1.flags(f)),
266 actions.append((f, "d", (None, f2, m1.flags(f)),
259 "remote renamed directory to " + f2))
267 "remote renamed directory to " + f2))
260 elif f in copy:
268 elif f in copy:
261 f2 = copy[f]
269 f2 = copy[f]
262 actions.append((f, "m", (f2, f, False),
270 actions.append((f, "m", (f2, f, False),
263 "local copied/moved to " + f2))
271 "local copied/moved to " + f2))
264 elif f in ma: # clean, a different, no remote
272 elif f in ma: # clean, a different, no remote
265 if n != ma[f]:
273 if n != ma[f]:
266 prompts.append((f, "cd")) # prompt changed/deleted
274 prompts.append((f, "cd")) # prompt changed/deleted
267 elif n[20:] == "a": # added, no remote
275 elif n[20:] == "a": # added, no remote
268 actions.append((f, "f", None, "remote deleted"))
276 actions.append((f, "f", None, "remote deleted"))
269 else:
277 else:
270 actions.append((f, "r", None, "other deleted"))
278 actions.append((f, "r", None, "other deleted"))
271
279
272 for f, n in m2.iteritems():
280 for f, n in m2.iteritems():
273 if partial and not partial(f):
281 if partial and not partial(f):
274 continue
282 continue
275 if f in m1 or f in copied: # files already visited
283 if f in m1 or f in copied: # files already visited
276 continue
284 continue
277 if f in movewithdir:
285 if f in movewithdir:
278 f2 = movewithdir[f]
286 f2 = movewithdir[f]
279 actions.append((None, "d", (f, f2, m2.flags(f)),
287 actions.append((None, "d", (f, f2, m2.flags(f)),
280 "local renamed directory to " + f2))
288 "local renamed directory to " + f2))
281 elif f in copy:
289 elif f in copy:
282 f2 = copy[f]
290 f2 = copy[f]
283 if f2 in m2:
291 if f2 in m2:
284 actions.append((f2, "m", (f, f, False),
292 actions.append((f2, "m", (f, f, False),
285 "remote copied to " + f))
293 "remote copied to " + f))
286 else:
294 else:
287 actions.append((f2, "m", (f, f, True),
295 actions.append((f2, "m", (f, f, True),
288 "remote moved to " + f))
296 "remote moved to " + f))
289 elif f not in ma:
297 elif f not in ma:
290 # local unknown, remote created: the logic is described by the
298 # local unknown, remote created: the logic is described by the
291 # following table:
299 # following table:
292 #
300 #
293 # force branchmerge different | action
301 # force branchmerge different | action
294 # n * n | get
302 # n * n | get
295 # n * y | abort
303 # n * y | abort
296 # y n * | get
304 # y n * | get
297 # y y n | get
305 # y y n | get
298 # y y y | merge
306 # y y y | merge
299 #
307 #
300 # Checking whether the files are different is expensive, so we
308 # Checking whether the files are different is expensive, so we
301 # don't do that when we can avoid it.
309 # don't do that when we can avoid it.
302 if force and not branchmerge:
310 if force and not branchmerge:
303 actions.append((f, "g", (m2.flags(f),), "remote created"))
311 actions.append((f, "g", (m2.flags(f),), "remote created"))
304 else:
312 else:
305 different = _checkunknownfile(repo, wctx, p2, f)
313 different = _checkunknownfile(repo, wctx, p2, f)
306 if force and branchmerge and different:
314 if force and branchmerge and different:
307 actions.append((f, "m", (f, f, False),
315 actions.append((f, "m", (f, f, False),
308 "remote differs from untracked local"))
316 "remote differs from untracked local"))
309 elif not force and different:
317 elif not force and different:
310 aborts.append((f, "ud"))
318 aborts.append((f, "ud"))
311 else:
319 else:
312 actions.append((f, "g", (m2.flags(f),), "remote created"))
320 actions.append((f, "g", (m2.flags(f),), "remote created"))
313 elif n != ma[f]:
321 elif n != ma[f]:
314 prompts.append((f, "dc")) # prompt deleted/changed
322 prompts.append((f, "dc")) # prompt deleted/changed
315
323
316 for f, m in sorted(aborts):
324 for f, m in sorted(aborts):
317 if m == "ud":
325 if m == "ud":
318 repo.ui.warn(_("%s: untracked file differs\n") % f)
326 repo.ui.warn(_("%s: untracked file differs\n") % f)
319 else: assert False, m
327 else: assert False, m
320 if aborts:
328 if aborts:
321 raise util.Abort(_("untracked files in working directory differ "
329 raise util.Abort(_("untracked files in working directory differ "
322 "from files in requested revision"))
330 "from files in requested revision"))
323
331
324 for f, m in sorted(prompts):
332 for f, m in sorted(prompts):
325 if m == "cd":
333 if m == "cd":
326 if repo.ui.promptchoice(
334 if repo.ui.promptchoice(
327 _("local changed %s which remote deleted\n"
335 _("local changed %s which remote deleted\n"
328 "use (c)hanged version or (d)elete?") % f,
336 "use (c)hanged version or (d)elete?") % f,
329 (_("&Changed"), _("&Delete")), 0):
337 (_("&Changed"), _("&Delete")), 0):
330 actions.append((f, "r", None, "prompt delete"))
338 actions.append((f, "r", None, "prompt delete"))
331 else:
339 else:
332 actions.append((f, "a", None, "prompt keep"))
340 actions.append((f, "a", None, "prompt keep"))
333 elif m == "dc":
341 elif m == "dc":
334 if repo.ui.promptchoice(
342 if repo.ui.promptchoice(
335 _("remote changed %s which local deleted\n"
343 _("remote changed %s which local deleted\n"
336 "use (c)hanged version or leave (d)eleted?") % f,
344 "use (c)hanged version or leave (d)eleted?") % f,
337 (_("&Changed"), _("&Deleted")), 0) == 0:
345 (_("&Changed"), _("&Deleted")), 0) == 0:
338 actions.append((f, "g", (m2.flags(f),), "prompt recreating"))
346 actions.append((f, "g", (m2.flags(f),), "prompt recreating"))
339 else: assert False, m
347 else: assert False, m
340 return actions
348 return actions
341
349
342 def actionkey(a):
350 def actionkey(a):
343 return a[1] == "r" and -1 or 0, a
351 return a[1] == "r" and -1 or 0, a
344
352
345 def getremove(repo, mctx, overwrite, args):
353 def getremove(repo, mctx, overwrite, args):
346 """apply usually-non-interactive updates to the working directory
354 """apply usually-non-interactive updates to the working directory
347
355
348 mctx is the context to be merged into the working copy
356 mctx is the context to be merged into the working copy
349
357
350 yields tuples for progress updates
358 yields tuples for progress updates
351 """
359 """
352 verbose = repo.ui.verbose
360 verbose = repo.ui.verbose
353 unlink = util.unlinkpath
361 unlink = util.unlinkpath
354 wjoin = repo.wjoin
362 wjoin = repo.wjoin
355 fctx = mctx.filectx
363 fctx = mctx.filectx
356 wwrite = repo.wwrite
364 wwrite = repo.wwrite
357 audit = repo.wopener.audit
365 audit = repo.wopener.audit
358 i = 0
366 i = 0
359 for arg in args:
367 for arg in args:
360 f = arg[0]
368 f = arg[0]
361 if arg[1] == 'r':
369 if arg[1] == 'r':
362 if verbose:
370 if verbose:
363 repo.ui.note(_("removing %s\n") % f)
371 repo.ui.note(_("removing %s\n") % f)
364 audit(f)
372 audit(f)
365 try:
373 try:
366 unlink(wjoin(f), ignoremissing=True)
374 unlink(wjoin(f), ignoremissing=True)
367 except OSError, inst:
375 except OSError, inst:
368 repo.ui.warn(_("update failed to remove %s: %s!\n") %
376 repo.ui.warn(_("update failed to remove %s: %s!\n") %
369 (f, inst.strerror))
377 (f, inst.strerror))
370 else:
378 else:
371 if verbose:
379 if verbose:
372 repo.ui.note(_("getting %s\n") % f)
380 repo.ui.note(_("getting %s\n") % f)
373 wwrite(f, fctx(f).data(), arg[2][0])
381 wwrite(f, fctx(f).data(), arg[2][0])
374 if i == 100:
382 if i == 100:
375 yield i, f
383 yield i, f
376 i = 0
384 i = 0
377 i += 1
385 i += 1
378 if i > 0:
386 if i > 0:
379 yield i, f
387 yield i, f
380
388
381 def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
389 def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
382 """apply the merge action list to the working directory
390 """apply the merge action list to the working directory
383
391
384 wctx is the working copy context
392 wctx is the working copy context
385 mctx is the context to be merged into the working copy
393 mctx is the context to be merged into the working copy
386 actx is the context of the common ancestor
394 actx is the context of the common ancestor
387
395
388 Return a tuple of counts (updated, merged, removed, unresolved) that
396 Return a tuple of counts (updated, merged, removed, unresolved) that
389 describes how many files were affected by the update.
397 describes how many files were affected by the update.
390 """
398 """
391
399
392 updated, merged, removed, unresolved = 0, 0, 0, 0
400 updated, merged, removed, unresolved = 0, 0, 0, 0
393 ms = mergestate(repo)
401 ms = mergestate(repo)
394 ms.reset(wctx.p1().node())
402 ms.reset(wctx.p1().node())
395 moves = []
403 moves = []
396 actions.sort(key=actionkey)
404 actions.sort(key=actionkey)
397
405
398 # prescan for merges
406 # prescan for merges
399 for a in actions:
407 for a in actions:
400 f, m, args, msg = a
408 f, m, args, msg = a
401 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
409 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
402 if m == "m": # merge
410 if m == "m": # merge
403 f2, fd, move = args
411 f2, fd, move = args
404 if fd == '.hgsubstate': # merged internally
412 if fd == '.hgsubstate': # merged internally
405 continue
413 continue
406 repo.ui.debug(" preserving %s for resolve of %s\n" % (f, fd))
414 repo.ui.debug(" preserving %s for resolve of %s\n" % (f, fd))
407 fcl = wctx[f]
415 fcl = wctx[f]
408 fco = mctx[f2]
416 fco = mctx[f2]
409 if mctx == actx: # backwards, use working dir parent as ancestor
417 if mctx == actx: # backwards, use working dir parent as ancestor
410 if fcl.parents():
418 if fcl.parents():
411 fca = fcl.p1()
419 fca = fcl.p1()
412 else:
420 else:
413 fca = repo.filectx(f, fileid=nullrev)
421 fca = repo.filectx(f, fileid=nullrev)
414 else:
422 else:
415 fca = fcl.ancestor(fco, actx)
423 fca = fcl.ancestor(fco, actx)
416 if not fca:
424 if not fca:
417 fca = repo.filectx(f, fileid=nullrev)
425 fca = repo.filectx(f, fileid=nullrev)
418 ms.add(fcl, fco, fca, fd)
426 ms.add(fcl, fco, fca, fd)
419 if f != fd and move:
427 if f != fd and move:
420 moves.append(f)
428 moves.append(f)
421
429
422 audit = repo.wopener.audit
430 audit = repo.wopener.audit
423
431
424 # remove renamed files after safely stored
432 # remove renamed files after safely stored
425 for f in moves:
433 for f in moves:
426 if os.path.lexists(repo.wjoin(f)):
434 if os.path.lexists(repo.wjoin(f)):
427 repo.ui.debug("removing %s\n" % f)
435 repo.ui.debug("removing %s\n" % f)
428 audit(f)
436 audit(f)
429 util.unlinkpath(repo.wjoin(f))
437 util.unlinkpath(repo.wjoin(f))
430
438
431 numupdates = len(actions)
439 numupdates = len(actions)
432 workeractions = [a for a in actions if a[1] in 'gr']
440 workeractions = [a for a in actions if a[1] in 'gr']
433 updated = len([a for a in workeractions if a[1] == 'g'])
441 updated = len([a for a in workeractions if a[1] == 'g'])
434 removed = len([a for a in workeractions if a[1] == 'r'])
442 removed = len([a for a in workeractions if a[1] == 'r'])
435 actions = [a for a in actions if a[1] not in 'gr']
443 actions = [a for a in actions if a[1] not in 'gr']
436
444
437 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
445 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
438 if hgsub and hgsub[0] == 'r':
446 if hgsub and hgsub[0] == 'r':
439 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
447 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
440
448
441 z = 0
449 z = 0
442 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
450 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
443 workeractions)
451 workeractions)
444 for i, item in prog:
452 for i, item in prog:
445 z += i
453 z += i
446 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
454 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
447 unit=_('files'))
455 unit=_('files'))
448
456
449 if hgsub and hgsub[0] == 'g':
457 if hgsub and hgsub[0] == 'g':
450 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
458 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
451
459
452 _updating = _('updating')
460 _updating = _('updating')
453 _files = _('files')
461 _files = _('files')
454 progress = repo.ui.progress
462 progress = repo.ui.progress
455
463
456 for i, a in enumerate(actions):
464 for i, a in enumerate(actions):
457 f, m, args, msg = a
465 f, m, args, msg = a
458 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
466 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
459 if m == "m": # merge
467 if m == "m": # merge
460 if fd == '.hgsubstate': # subrepo states need updating
468 if fd == '.hgsubstate': # subrepo states need updating
461 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
469 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
462 overwrite)
470 overwrite)
463 continue
471 continue
464 f2, fd, move = args
472 f2, fd, move = args
465 audit(fd)
473 audit(fd)
466 r = ms.resolve(fd, wctx, mctx)
474 r = ms.resolve(fd, wctx, mctx)
467 if r is not None and r > 0:
475 if r is not None and r > 0:
468 unresolved += 1
476 unresolved += 1
469 else:
477 else:
470 if r is None:
478 if r is None:
471 updated += 1
479 updated += 1
472 else:
480 else:
473 merged += 1
481 merged += 1
474 elif m == "d": # directory rename
482 elif m == "d": # directory rename
475 f2, fd, flags = args
483 f2, fd, flags = args
476 if f:
484 if f:
477 repo.ui.note(_("moving %s to %s\n") % (f, fd))
485 repo.ui.note(_("moving %s to %s\n") % (f, fd))
478 audit(f)
486 audit(f)
479 repo.wwrite(fd, wctx.filectx(f).data(), flags)
487 repo.wwrite(fd, wctx.filectx(f).data(), flags)
480 util.unlinkpath(repo.wjoin(f))
488 util.unlinkpath(repo.wjoin(f))
481 if f2:
489 if f2:
482 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
490 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
483 repo.wwrite(fd, mctx.filectx(f2).data(), flags)
491 repo.wwrite(fd, mctx.filectx(f2).data(), flags)
484 updated += 1
492 updated += 1
485 elif m == "dr": # divergent renames
493 elif m == "dr": # divergent renames
486 fl, = args
494 fl, = args
487 repo.ui.warn(_("note: possible conflict - %s was renamed "
495 repo.ui.warn(_("note: possible conflict - %s was renamed "
488 "multiple times to:\n") % f)
496 "multiple times to:\n") % f)
489 for nf in fl:
497 for nf in fl:
490 repo.ui.warn(" %s\n" % nf)
498 repo.ui.warn(" %s\n" % nf)
491 elif m == "rd": # rename and delete
499 elif m == "rd": # rename and delete
492 fl, = args
500 fl, = args
493 repo.ui.warn(_("note: possible conflict - %s was deleted "
501 repo.ui.warn(_("note: possible conflict - %s was deleted "
494 "and renamed to:\n") % f)
502 "and renamed to:\n") % f)
495 for nf in fl:
503 for nf in fl:
496 repo.ui.warn(" %s\n" % nf)
504 repo.ui.warn(" %s\n" % nf)
497 elif m == "e": # exec
505 elif m == "e": # exec
498 flags, = args
506 flags, = args
499 audit(f)
507 audit(f)
500 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
508 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
501 updated += 1
509 updated += 1
502 ms.commit()
510 ms.commit()
503 progress(_updating, None, total=numupdates, unit=_files)
511 progress(_updating, None, total=numupdates, unit=_files)
504
512
505 return updated, merged, removed, unresolved
513 return updated, merged, removed, unresolved
506
514
507 def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial):
515 def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial):
508 "Calculate the actions needed to merge mctx into tctx"
516 "Calculate the actions needed to merge mctx into tctx"
509 actions = []
517 actions = []
510 folding = not util.checkcase(repo.path)
518 folding = not util.checkcase(repo.path)
511 if folding:
519 if folding:
512 # collision check is not needed for clean update
520 # collision check is not needed for clean update
513 if (not branchmerge and
521 if (not branchmerge and
514 (force or not tctx.dirty(missing=True, branch=False))):
522 (force or not tctx.dirty(missing=True, branch=False))):
515 _checkcollision(mctx, None)
523 _checkcollision(mctx, None)
516 else:
524 else:
517 _checkcollision(mctx, (tctx, ancestor))
525 _checkcollision(mctx, (tctx, ancestor))
518 if tctx.rev() is None:
519 actions += _forgetremoved(tctx, mctx, branchmerge)
520 actions += manifestmerge(repo, tctx, mctx,
526 actions += manifestmerge(repo, tctx, mctx,
521 ancestor,
527 ancestor,
522 branchmerge, force,
528 branchmerge, force,
523 partial)
529 partial)
530 if tctx.rev() is None:
531 actions += _forgetremoved(tctx, mctx, branchmerge)
524 return actions
532 return actions
525
533
526 def recordupdates(repo, actions, branchmerge):
534 def recordupdates(repo, actions, branchmerge):
527 "record merge actions to the dirstate"
535 "record merge actions to the dirstate"
528
536
529 for a in actions:
537 for a in actions:
530 f, m, args, msg = a
538 f, m, args, msg = a
531 if m == "r": # remove
539 if m == "r": # remove
532 if branchmerge:
540 if branchmerge:
533 repo.dirstate.remove(f)
541 repo.dirstate.remove(f)
534 else:
542 else:
535 repo.dirstate.drop(f)
543 repo.dirstate.drop(f)
536 elif m == "a": # re-add
544 elif m == "a": # re-add
537 if not branchmerge:
545 if not branchmerge:
538 repo.dirstate.add(f)
546 repo.dirstate.add(f)
539 elif m == "f": # forget
547 elif m == "f": # forget
540 repo.dirstate.drop(f)
548 repo.dirstate.drop(f)
541 elif m == "e": # exec change
549 elif m == "e": # exec change
542 repo.dirstate.normallookup(f)
550 repo.dirstate.normallookup(f)
543 elif m == "g": # get
551 elif m == "g": # get
544 if branchmerge:
552 if branchmerge:
545 repo.dirstate.otherparent(f)
553 repo.dirstate.otherparent(f)
546 else:
554 else:
547 repo.dirstate.normal(f)
555 repo.dirstate.normal(f)
548 elif m == "m": # merge
556 elif m == "m": # merge
549 f2, fd, move = args
557 f2, fd, move = args
550 if branchmerge:
558 if branchmerge:
551 # We've done a branch merge, mark this file as merged
559 # We've done a branch merge, mark this file as merged
552 # so that we properly record the merger later
560 # so that we properly record the merger later
553 repo.dirstate.merge(fd)
561 repo.dirstate.merge(fd)
554 if f != f2: # copy/rename
562 if f != f2: # copy/rename
555 if move:
563 if move:
556 repo.dirstate.remove(f)
564 repo.dirstate.remove(f)
557 if f != fd:
565 if f != fd:
558 repo.dirstate.copy(f, fd)
566 repo.dirstate.copy(f, fd)
559 else:
567 else:
560 repo.dirstate.copy(f2, fd)
568 repo.dirstate.copy(f2, fd)
561 else:
569 else:
562 # We've update-merged a locally modified file, so
570 # We've update-merged a locally modified file, so
563 # we set the dirstate to emulate a normal checkout
571 # we set the dirstate to emulate a normal checkout
564 # of that file some time in the past. Thus our
572 # of that file some time in the past. Thus our
565 # merge will appear as a normal local file
573 # merge will appear as a normal local file
566 # modification.
574 # modification.
567 if f2 == fd: # file not locally copied/moved
575 if f2 == fd: # file not locally copied/moved
568 repo.dirstate.normallookup(fd)
576 repo.dirstate.normallookup(fd)
569 if move:
577 if move:
570 repo.dirstate.drop(f)
578 repo.dirstate.drop(f)
571 elif m == "d": # directory rename
579 elif m == "d": # directory rename
572 f2, fd, flag = args
580 f2, fd, flag = args
573 if not f2 and f not in repo.dirstate:
581 if not f2 and f not in repo.dirstate:
574 # untracked file moved
582 # untracked file moved
575 continue
583 continue
576 if branchmerge:
584 if branchmerge:
577 repo.dirstate.add(fd)
585 repo.dirstate.add(fd)
578 if f:
586 if f:
579 repo.dirstate.remove(f)
587 repo.dirstate.remove(f)
580 repo.dirstate.copy(f, fd)
588 repo.dirstate.copy(f, fd)
581 if f2:
589 if f2:
582 repo.dirstate.copy(f2, fd)
590 repo.dirstate.copy(f2, fd)
583 else:
591 else:
584 repo.dirstate.normal(fd)
592 repo.dirstate.normal(fd)
585 if f:
593 if f:
586 repo.dirstate.drop(f)
594 repo.dirstate.drop(f)
587
595
588 def update(repo, node, branchmerge, force, partial, ancestor=None,
596 def update(repo, node, branchmerge, force, partial, ancestor=None,
589 mergeancestor=False):
597 mergeancestor=False):
590 """
598 """
591 Perform a merge between the working directory and the given node
599 Perform a merge between the working directory and the given node
592
600
593 node = the node to update to, or None if unspecified
601 node = the node to update to, or None if unspecified
594 branchmerge = whether to merge between branches
602 branchmerge = whether to merge between branches
595 force = whether to force branch merging or file overwriting
603 force = whether to force branch merging or file overwriting
596 partial = a function to filter file lists (dirstate not updated)
604 partial = a function to filter file lists (dirstate not updated)
597 mergeancestor = if false, merging with an ancestor (fast-forward)
605 mergeancestor = if false, merging with an ancestor (fast-forward)
598 is only allowed between different named branches. This flag
606 is only allowed between different named branches. This flag
599 is used by rebase extension as a temporary fix and should be
607 is used by rebase extension as a temporary fix and should be
600 avoided in general.
608 avoided in general.
601
609
602 The table below shows all the behaviors of the update command
610 The table below shows all the behaviors of the update command
603 given the -c and -C or no options, whether the working directory
611 given the -c and -C or no options, whether the working directory
604 is dirty, whether a revision is specified, and the relationship of
612 is dirty, whether a revision is specified, and the relationship of
605 the parent rev to the target rev (linear, on the same named
613 the parent rev to the target rev (linear, on the same named
606 branch, or on another named branch).
614 branch, or on another named branch).
607
615
608 This logic is tested by test-update-branches.t.
616 This logic is tested by test-update-branches.t.
609
617
610 -c -C dirty rev | linear same cross
618 -c -C dirty rev | linear same cross
611 n n n n | ok (1) x
619 n n n n | ok (1) x
612 n n n y | ok ok ok
620 n n n y | ok ok ok
613 n n y * | merge (2) (2)
621 n n y * | merge (2) (2)
614 n y * * | --- discard ---
622 n y * * | --- discard ---
615 y n y * | --- (3) ---
623 y n y * | --- (3) ---
616 y n n * | --- ok ---
624 y n n * | --- ok ---
617 y y * * | --- (4) ---
625 y y * * | --- (4) ---
618
626
619 x = can't happen
627 x = can't happen
620 * = don't-care
628 * = don't-care
621 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
629 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
622 2 = abort: crosses branches (use 'hg merge' to merge or
630 2 = abort: crosses branches (use 'hg merge' to merge or
623 use 'hg update -C' to discard changes)
631 use 'hg update -C' to discard changes)
624 3 = abort: uncommitted local changes
632 3 = abort: uncommitted local changes
625 4 = incompatible options (checked in commands.py)
633 4 = incompatible options (checked in commands.py)
626
634
627 Return the same tuple as applyupdates().
635 Return the same tuple as applyupdates().
628 """
636 """
629
637
630 onode = node
638 onode = node
631 wlock = repo.wlock()
639 wlock = repo.wlock()
632 try:
640 try:
633 wc = repo[None]
641 wc = repo[None]
634 if node is None:
642 if node is None:
635 # tip of current branch
643 # tip of current branch
636 try:
644 try:
637 node = repo.branchtip(wc.branch())
645 node = repo.branchtip(wc.branch())
638 except error.RepoLookupError:
646 except error.RepoLookupError:
639 if wc.branch() == "default": # no default branch!
647 if wc.branch() == "default": # no default branch!
640 node = repo.lookup("tip") # update to tip
648 node = repo.lookup("tip") # update to tip
641 else:
649 else:
642 raise util.Abort(_("branch %s not found") % wc.branch())
650 raise util.Abort(_("branch %s not found") % wc.branch())
643 overwrite = force and not branchmerge
651 overwrite = force and not branchmerge
644 pl = wc.parents()
652 pl = wc.parents()
645 p1, p2 = pl[0], repo[node]
653 p1, p2 = pl[0], repo[node]
646 if ancestor:
654 if ancestor:
647 pa = repo[ancestor]
655 pa = repo[ancestor]
648 else:
656 else:
649 pa = p1.ancestor(p2)
657 pa = p1.ancestor(p2)
650
658
651 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
659 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
652
660
653 ### check phase
661 ### check phase
654 if not overwrite and len(pl) > 1:
662 if not overwrite and len(pl) > 1:
655 raise util.Abort(_("outstanding uncommitted merges"))
663 raise util.Abort(_("outstanding uncommitted merges"))
656 if branchmerge:
664 if branchmerge:
657 if pa == p2:
665 if pa == p2:
658 raise util.Abort(_("merging with a working directory ancestor"
666 raise util.Abort(_("merging with a working directory ancestor"
659 " has no effect"))
667 " has no effect"))
660 elif pa == p1:
668 elif pa == p1:
661 if not mergeancestor and p1.branch() == p2.branch():
669 if not mergeancestor and p1.branch() == p2.branch():
662 raise util.Abort(_("nothing to merge"),
670 raise util.Abort(_("nothing to merge"),
663 hint=_("use 'hg update' "
671 hint=_("use 'hg update' "
664 "or check 'hg heads'"))
672 "or check 'hg heads'"))
665 if not force and (wc.files() or wc.deleted()):
673 if not force and (wc.files() or wc.deleted()):
666 raise util.Abort(_("outstanding uncommitted changes"),
674 raise util.Abort(_("outstanding uncommitted changes"),
667 hint=_("use 'hg status' to list changes"))
675 hint=_("use 'hg status' to list changes"))
668 for s in sorted(wc.substate):
676 for s in sorted(wc.substate):
669 if wc.sub(s).dirty():
677 if wc.sub(s).dirty():
670 raise util.Abort(_("outstanding uncommitted changes in "
678 raise util.Abort(_("outstanding uncommitted changes in "
671 "subrepository '%s'") % s)
679 "subrepository '%s'") % s)
672
680
673 elif not overwrite:
681 elif not overwrite:
674 if pa == p1 or pa == p2: # linear
682 if pa == p1 or pa == p2: # linear
675 pass # all good
683 pass # all good
676 elif wc.dirty(missing=True):
684 elif wc.dirty(missing=True):
677 raise util.Abort(_("crosses branches (merge branches or use"
685 raise util.Abort(_("crosses branches (merge branches or use"
678 " --clean to discard changes)"))
686 " --clean to discard changes)"))
679 elif onode is None:
687 elif onode is None:
680 raise util.Abort(_("crosses branches (merge branches or update"
688 raise util.Abort(_("crosses branches (merge branches or update"
681 " --check to force update)"))
689 " --check to force update)"))
682 else:
690 else:
683 # Allow jumping branches if clean and specific rev given
691 # Allow jumping branches if clean and specific rev given
684 pa = p1
692 pa = p1
685
693
686 ### calculate phase
694 ### calculate phase
687 actions = calculateupdates(repo, wc, p2, pa,
695 actions = calculateupdates(repo, wc, p2, pa,
688 branchmerge, force, partial)
696 branchmerge, force, partial)
689
697
690 ### apply phase
698 ### apply phase
691 if not branchmerge: # just jump to the new rev
699 if not branchmerge: # just jump to the new rev
692 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
700 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
693 if not partial:
701 if not partial:
694 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
702 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
695
703
696 stats = applyupdates(repo, actions, wc, p2, pa, overwrite)
704 stats = applyupdates(repo, actions, wc, p2, pa, overwrite)
697
705
698 if not partial:
706 if not partial:
699 repo.setparents(fp1, fp2)
707 repo.setparents(fp1, fp2)
700 recordupdates(repo, actions, branchmerge)
708 recordupdates(repo, actions, branchmerge)
701 if not branchmerge:
709 if not branchmerge:
702 repo.dirstate.setbranch(p2.branch())
710 repo.dirstate.setbranch(p2.branch())
703 finally:
711 finally:
704 wlock.release()
712 wlock.release()
705
713
706 if not partial:
714 if not partial:
707 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
715 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
708 return stats
716 return stats
General Comments 0
You need to be logged in to leave comments. Login now