##// END OF EJS Templates
dirstate: use 'm' state in otherparent to reduce ambiguity...
Matt Mackall -
r22896:7e9cbb9c default
parent child Browse files
Show More
@@ -1,928 +1,934 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
10 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
11 import os, stat, errno, gc
11 import os, stat, errno, gc
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14 filecache = scmutil.filecache
14 filecache = scmutil.filecache
15 _rangemask = 0x7fffffff
15 _rangemask = 0x7fffffff
16
16
17 dirstatetuple = parsers.dirstatetuple
17 dirstatetuple = parsers.dirstatetuple
18
18
19 class repocache(filecache):
19 class repocache(filecache):
20 """filecache for files in .hg/"""
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
22 return obj._opener.join(fname)
23
23
24 class rootcache(filecache):
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
26 def join(self, obj, fname):
27 return obj._join(fname)
27 return obj._join(fname)
28
28
29 class dirstate(object):
29 class dirstate(object):
30
30
31 def __init__(self, opener, ui, root, validate):
31 def __init__(self, opener, ui, root, validate):
32 '''Create a new dirstate object.
32 '''Create a new dirstate object.
33
33
34 opener is an open()-like callable that can be used to open the
34 opener is an open()-like callable that can be used to open the
35 dirstate file; root is the root of the directory tracked by
35 dirstate file; root is the root of the directory tracked by
36 the dirstate.
36 the dirstate.
37 '''
37 '''
38 self._opener = opener
38 self._opener = opener
39 self._validate = validate
39 self._validate = validate
40 self._root = root
40 self._root = root
41 self._rootdir = os.path.join(root, '')
41 self._rootdir = os.path.join(root, '')
42 self._dirty = False
42 self._dirty = False
43 self._dirtypl = False
43 self._dirtypl = False
44 self._lastnormaltime = 0
44 self._lastnormaltime = 0
45 self._ui = ui
45 self._ui = ui
46 self._filecache = {}
46 self._filecache = {}
47 self._parentwriters = 0
47 self._parentwriters = 0
48
48
49 def beginparentchange(self):
49 def beginparentchange(self):
50 '''Marks the beginning of a set of changes that involve changing
50 '''Marks the beginning of a set of changes that involve changing
51 the dirstate parents. If there is an exception during this time,
51 the dirstate parents. If there is an exception during this time,
52 the dirstate will not be written when the wlock is released. This
52 the dirstate will not be written when the wlock is released. This
53 prevents writing an incoherent dirstate where the parent doesn't
53 prevents writing an incoherent dirstate where the parent doesn't
54 match the contents.
54 match the contents.
55 '''
55 '''
56 self._parentwriters += 1
56 self._parentwriters += 1
57
57
58 def endparentchange(self):
58 def endparentchange(self):
59 '''Marks the end of a set of changes that involve changing the
59 '''Marks the end of a set of changes that involve changing the
60 dirstate parents. Once all parent changes have been marked done,
60 dirstate parents. Once all parent changes have been marked done,
61 the wlock will be free to write the dirstate on release.
61 the wlock will be free to write the dirstate on release.
62 '''
62 '''
63 if self._parentwriters > 0:
63 if self._parentwriters > 0:
64 self._parentwriters -= 1
64 self._parentwriters -= 1
65
65
66 def pendingparentchange(self):
66 def pendingparentchange(self):
67 '''Returns true if the dirstate is in the middle of a set of changes
67 '''Returns true if the dirstate is in the middle of a set of changes
68 that modify the dirstate parent.
68 that modify the dirstate parent.
69 '''
69 '''
70 return self._parentwriters > 0
70 return self._parentwriters > 0
71
71
72 @propertycache
72 @propertycache
73 def _map(self):
73 def _map(self):
74 '''Return the dirstate contents as a map from filename to
74 '''Return the dirstate contents as a map from filename to
75 (state, mode, size, time).'''
75 (state, mode, size, time).'''
76 self._read()
76 self._read()
77 return self._map
77 return self._map
78
78
79 @propertycache
79 @propertycache
80 def _copymap(self):
80 def _copymap(self):
81 self._read()
81 self._read()
82 return self._copymap
82 return self._copymap
83
83
84 @propertycache
84 @propertycache
85 def _foldmap(self):
85 def _foldmap(self):
86 f = {}
86 f = {}
87 normcase = util.normcase
87 normcase = util.normcase
88 for name, s in self._map.iteritems():
88 for name, s in self._map.iteritems():
89 if s[0] != 'r':
89 if s[0] != 'r':
90 f[normcase(name)] = name
90 f[normcase(name)] = name
91 for name in self._dirs:
91 for name in self._dirs:
92 f[normcase(name)] = name
92 f[normcase(name)] = name
93 f['.'] = '.' # prevents useless util.fspath() invocation
93 f['.'] = '.' # prevents useless util.fspath() invocation
94 return f
94 return f
95
95
96 @repocache('branch')
96 @repocache('branch')
97 def _branch(self):
97 def _branch(self):
98 try:
98 try:
99 return self._opener.read("branch").strip() or "default"
99 return self._opener.read("branch").strip() or "default"
100 except IOError, inst:
100 except IOError, inst:
101 if inst.errno != errno.ENOENT:
101 if inst.errno != errno.ENOENT:
102 raise
102 raise
103 return "default"
103 return "default"
104
104
105 @propertycache
105 @propertycache
106 def _pl(self):
106 def _pl(self):
107 try:
107 try:
108 fp = self._opener("dirstate")
108 fp = self._opener("dirstate")
109 st = fp.read(40)
109 st = fp.read(40)
110 fp.close()
110 fp.close()
111 l = len(st)
111 l = len(st)
112 if l == 40:
112 if l == 40:
113 return st[:20], st[20:40]
113 return st[:20], st[20:40]
114 elif l > 0 and l < 40:
114 elif l > 0 and l < 40:
115 raise util.Abort(_('working directory state appears damaged!'))
115 raise util.Abort(_('working directory state appears damaged!'))
116 except IOError, err:
116 except IOError, err:
117 if err.errno != errno.ENOENT:
117 if err.errno != errno.ENOENT:
118 raise
118 raise
119 return [nullid, nullid]
119 return [nullid, nullid]
120
120
121 @propertycache
121 @propertycache
122 def _dirs(self):
122 def _dirs(self):
123 return scmutil.dirs(self._map, 'r')
123 return scmutil.dirs(self._map, 'r')
124
124
125 def dirs(self):
125 def dirs(self):
126 return self._dirs
126 return self._dirs
127
127
128 @rootcache('.hgignore')
128 @rootcache('.hgignore')
129 def _ignore(self):
129 def _ignore(self):
130 files = [self._join('.hgignore')]
130 files = [self._join('.hgignore')]
131 for name, path in self._ui.configitems("ui"):
131 for name, path in self._ui.configitems("ui"):
132 if name == 'ignore' or name.startswith('ignore.'):
132 if name == 'ignore' or name.startswith('ignore.'):
133 files.append(util.expandpath(path))
133 files.append(util.expandpath(path))
134 return ignore.ignore(self._root, files, self._ui.warn)
134 return ignore.ignore(self._root, files, self._ui.warn)
135
135
136 @propertycache
136 @propertycache
137 def _slash(self):
137 def _slash(self):
138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
139
139
140 @propertycache
140 @propertycache
141 def _checklink(self):
141 def _checklink(self):
142 return util.checklink(self._root)
142 return util.checklink(self._root)
143
143
144 @propertycache
144 @propertycache
145 def _checkexec(self):
145 def _checkexec(self):
146 return util.checkexec(self._root)
146 return util.checkexec(self._root)
147
147
148 @propertycache
148 @propertycache
149 def _checkcase(self):
149 def _checkcase(self):
150 return not util.checkcase(self._join('.hg'))
150 return not util.checkcase(self._join('.hg'))
151
151
152 def _join(self, f):
152 def _join(self, f):
153 # much faster than os.path.join()
153 # much faster than os.path.join()
154 # it's safe because f is always a relative path
154 # it's safe because f is always a relative path
155 return self._rootdir + f
155 return self._rootdir + f
156
156
157 def flagfunc(self, buildfallback):
157 def flagfunc(self, buildfallback):
158 if self._checklink and self._checkexec:
158 if self._checklink and self._checkexec:
159 def f(x):
159 def f(x):
160 try:
160 try:
161 st = os.lstat(self._join(x))
161 st = os.lstat(self._join(x))
162 if util.statislink(st):
162 if util.statislink(st):
163 return 'l'
163 return 'l'
164 if util.statisexec(st):
164 if util.statisexec(st):
165 return 'x'
165 return 'x'
166 except OSError:
166 except OSError:
167 pass
167 pass
168 return ''
168 return ''
169 return f
169 return f
170
170
171 fallback = buildfallback()
171 fallback = buildfallback()
172 if self._checklink:
172 if self._checklink:
173 def f(x):
173 def f(x):
174 if os.path.islink(self._join(x)):
174 if os.path.islink(self._join(x)):
175 return 'l'
175 return 'l'
176 if 'x' in fallback(x):
176 if 'x' in fallback(x):
177 return 'x'
177 return 'x'
178 return ''
178 return ''
179 return f
179 return f
180 if self._checkexec:
180 if self._checkexec:
181 def f(x):
181 def f(x):
182 if 'l' in fallback(x):
182 if 'l' in fallback(x):
183 return 'l'
183 return 'l'
184 if util.isexec(self._join(x)):
184 if util.isexec(self._join(x)):
185 return 'x'
185 return 'x'
186 return ''
186 return ''
187 return f
187 return f
188 else:
188 else:
189 return fallback
189 return fallback
190
190
191 @propertycache
191 @propertycache
192 def _cwd(self):
192 def _cwd(self):
193 return os.getcwd()
193 return os.getcwd()
194
194
195 def getcwd(self):
195 def getcwd(self):
196 cwd = self._cwd
196 cwd = self._cwd
197 if cwd == self._root:
197 if cwd == self._root:
198 return ''
198 return ''
199 # self._root ends with a path separator if self._root is '/' or 'C:\'
199 # self._root ends with a path separator if self._root is '/' or 'C:\'
200 rootsep = self._root
200 rootsep = self._root
201 if not util.endswithsep(rootsep):
201 if not util.endswithsep(rootsep):
202 rootsep += os.sep
202 rootsep += os.sep
203 if cwd.startswith(rootsep):
203 if cwd.startswith(rootsep):
204 return cwd[len(rootsep):]
204 return cwd[len(rootsep):]
205 else:
205 else:
206 # we're outside the repo. return an absolute path.
206 # we're outside the repo. return an absolute path.
207 return cwd
207 return cwd
208
208
209 def pathto(self, f, cwd=None):
209 def pathto(self, f, cwd=None):
210 if cwd is None:
210 if cwd is None:
211 cwd = self.getcwd()
211 cwd = self.getcwd()
212 path = util.pathto(self._root, cwd, f)
212 path = util.pathto(self._root, cwd, f)
213 if self._slash:
213 if self._slash:
214 return util.pconvert(path)
214 return util.pconvert(path)
215 return path
215 return path
216
216
217 def __getitem__(self, key):
217 def __getitem__(self, key):
218 '''Return the current state of key (a filename) in the dirstate.
218 '''Return the current state of key (a filename) in the dirstate.
219
219
220 States are:
220 States are:
221 n normal
221 n normal
222 m needs merging
222 m needs merging
223 r marked for removal
223 r marked for removal
224 a marked for addition
224 a marked for addition
225 ? not tracked
225 ? not tracked
226 '''
226 '''
227 return self._map.get(key, ("?",))[0]
227 return self._map.get(key, ("?",))[0]
228
228
229 def __contains__(self, key):
229 def __contains__(self, key):
230 return key in self._map
230 return key in self._map
231
231
232 def __iter__(self):
232 def __iter__(self):
233 for x in sorted(self._map):
233 for x in sorted(self._map):
234 yield x
234 yield x
235
235
236 def iteritems(self):
236 def iteritems(self):
237 return self._map.iteritems()
237 return self._map.iteritems()
238
238
239 def parents(self):
239 def parents(self):
240 return [self._validate(p) for p in self._pl]
240 return [self._validate(p) for p in self._pl]
241
241
242 def p1(self):
242 def p1(self):
243 return self._validate(self._pl[0])
243 return self._validate(self._pl[0])
244
244
245 def p2(self):
245 def p2(self):
246 return self._validate(self._pl[1])
246 return self._validate(self._pl[1])
247
247
248 def branch(self):
248 def branch(self):
249 return encoding.tolocal(self._branch)
249 return encoding.tolocal(self._branch)
250
250
251 def setparents(self, p1, p2=nullid):
251 def setparents(self, p1, p2=nullid):
252 """Set dirstate parents to p1 and p2.
252 """Set dirstate parents to p1 and p2.
253
253
254 When moving from two parents to one, 'm' merged entries a
254 When moving from two parents to one, 'm' merged entries a
255 adjusted to normal and previous copy records discarded and
255 adjusted to normal and previous copy records discarded and
256 returned by the call.
256 returned by the call.
257
257
258 See localrepo.setparents()
258 See localrepo.setparents()
259 """
259 """
260 if self._parentwriters == 0:
260 if self._parentwriters == 0:
261 raise ValueError("cannot set dirstate parent without "
261 raise ValueError("cannot set dirstate parent without "
262 "calling dirstate.beginparentchange")
262 "calling dirstate.beginparentchange")
263
263
264 self._dirty = self._dirtypl = True
264 self._dirty = self._dirtypl = True
265 oldp2 = self._pl[1]
265 oldp2 = self._pl[1]
266 self._pl = p1, p2
266 self._pl = p1, p2
267 copies = {}
267 copies = {}
268 if oldp2 != nullid and p2 == nullid:
268 if oldp2 != nullid and p2 == nullid:
269 for f, s in self._map.iteritems():
269 for f, s in self._map.iteritems():
270 # Discard 'm' markers when moving away from a merge state
270 # Discard 'm' markers when moving away from a merge state
271 if s[0] == 'm':
271 if s[0] == 'm':
272 if f in self._copymap:
272 if f in self._copymap:
273 copies[f] = self._copymap[f]
273 copies[f] = self._copymap[f]
274 self.normallookup(f)
274 self.normallookup(f)
275 # Also fix up otherparent markers
275 # Also fix up otherparent markers
276 elif s[0] == 'n' and s[2] == -2:
276 elif s[0] == 'n' and s[2] == -2:
277 if f in self._copymap:
277 if f in self._copymap:
278 copies[f] = self._copymap[f]
278 copies[f] = self._copymap[f]
279 self.add(f)
279 self.add(f)
280 return copies
280 return copies
281
281
282 def setbranch(self, branch):
282 def setbranch(self, branch):
283 self._branch = encoding.fromlocal(branch)
283 self._branch = encoding.fromlocal(branch)
284 f = self._opener('branch', 'w', atomictemp=True)
284 f = self._opener('branch', 'w', atomictemp=True)
285 try:
285 try:
286 f.write(self._branch + '\n')
286 f.write(self._branch + '\n')
287 f.close()
287 f.close()
288
288
289 # make sure filecache has the correct stat info for _branch after
289 # make sure filecache has the correct stat info for _branch after
290 # replacing the underlying file
290 # replacing the underlying file
291 ce = self._filecache['_branch']
291 ce = self._filecache['_branch']
292 if ce:
292 if ce:
293 ce.refresh()
293 ce.refresh()
294 except: # re-raises
294 except: # re-raises
295 f.discard()
295 f.discard()
296 raise
296 raise
297
297
298 def _read(self):
298 def _read(self):
299 self._map = {}
299 self._map = {}
300 self._copymap = {}
300 self._copymap = {}
301 try:
301 try:
302 st = self._opener.read("dirstate")
302 st = self._opener.read("dirstate")
303 except IOError, err:
303 except IOError, err:
304 if err.errno != errno.ENOENT:
304 if err.errno != errno.ENOENT:
305 raise
305 raise
306 return
306 return
307 if not st:
307 if not st:
308 return
308 return
309
309
310 # Python's garbage collector triggers a GC each time a certain number
310 # Python's garbage collector triggers a GC each time a certain number
311 # of container objects (the number being defined by
311 # of container objects (the number being defined by
312 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
312 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
313 # for each file in the dirstate. The C version then immediately marks
313 # for each file in the dirstate. The C version then immediately marks
314 # them as not to be tracked by the collector. However, this has no
314 # them as not to be tracked by the collector. However, this has no
315 # effect on when GCs are triggered, only on what objects the GC looks
315 # effect on when GCs are triggered, only on what objects the GC looks
316 # into. This means that O(number of files) GCs are unavoidable.
316 # into. This means that O(number of files) GCs are unavoidable.
317 # Depending on when in the process's lifetime the dirstate is parsed,
317 # Depending on when in the process's lifetime the dirstate is parsed,
318 # this can get very expensive. As a workaround, disable GC while
318 # this can get very expensive. As a workaround, disable GC while
319 # parsing the dirstate.
319 # parsing the dirstate.
320 gcenabled = gc.isenabled()
320 gcenabled = gc.isenabled()
321 gc.disable()
321 gc.disable()
322 try:
322 try:
323 p = parsers.parse_dirstate(self._map, self._copymap, st)
323 p = parsers.parse_dirstate(self._map, self._copymap, st)
324 finally:
324 finally:
325 if gcenabled:
325 if gcenabled:
326 gc.enable()
326 gc.enable()
327 if not self._dirtypl:
327 if not self._dirtypl:
328 self._pl = p
328 self._pl = p
329
329
330 def invalidate(self):
330 def invalidate(self):
331 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
331 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
332 "_ignore"):
332 "_ignore"):
333 if a in self.__dict__:
333 if a in self.__dict__:
334 delattr(self, a)
334 delattr(self, a)
335 self._lastnormaltime = 0
335 self._lastnormaltime = 0
336 self._dirty = False
336 self._dirty = False
337 self._parentwriters = 0
337 self._parentwriters = 0
338
338
339 def copy(self, source, dest):
339 def copy(self, source, dest):
340 """Mark dest as a copy of source. Unmark dest if source is None."""
340 """Mark dest as a copy of source. Unmark dest if source is None."""
341 if source == dest:
341 if source == dest:
342 return
342 return
343 self._dirty = True
343 self._dirty = True
344 if source is not None:
344 if source is not None:
345 self._copymap[dest] = source
345 self._copymap[dest] = source
346 elif dest in self._copymap:
346 elif dest in self._copymap:
347 del self._copymap[dest]
347 del self._copymap[dest]
348
348
349 def copied(self, file):
349 def copied(self, file):
350 return self._copymap.get(file, None)
350 return self._copymap.get(file, None)
351
351
352 def copies(self):
352 def copies(self):
353 return self._copymap
353 return self._copymap
354
354
355 def _droppath(self, f):
355 def _droppath(self, f):
356 if self[f] not in "?r" and "_dirs" in self.__dict__:
356 if self[f] not in "?r" and "_dirs" in self.__dict__:
357 self._dirs.delpath(f)
357 self._dirs.delpath(f)
358
358
359 def _addpath(self, f, state, mode, size, mtime):
359 def _addpath(self, f, state, mode, size, mtime):
360 oldstate = self[f]
360 oldstate = self[f]
361 if state == 'a' or oldstate == 'r':
361 if state == 'a' or oldstate == 'r':
362 scmutil.checkfilename(f)
362 scmutil.checkfilename(f)
363 if f in self._dirs:
363 if f in self._dirs:
364 raise util.Abort(_('directory %r already in dirstate') % f)
364 raise util.Abort(_('directory %r already in dirstate') % f)
365 # shadows
365 # shadows
366 for d in scmutil.finddirs(f):
366 for d in scmutil.finddirs(f):
367 if d in self._dirs:
367 if d in self._dirs:
368 break
368 break
369 if d in self._map and self[d] != 'r':
369 if d in self._map and self[d] != 'r':
370 raise util.Abort(
370 raise util.Abort(
371 _('file %r in dirstate clashes with %r') % (d, f))
371 _('file %r in dirstate clashes with %r') % (d, f))
372 if oldstate in "?r" and "_dirs" in self.__dict__:
372 if oldstate in "?r" and "_dirs" in self.__dict__:
373 self._dirs.addpath(f)
373 self._dirs.addpath(f)
374 self._dirty = True
374 self._dirty = True
375 self._map[f] = dirstatetuple(state, mode, size, mtime)
375 self._map[f] = dirstatetuple(state, mode, size, mtime)
376
376
377 def normal(self, f):
377 def normal(self, f):
378 '''Mark a file normal and clean.'''
378 '''Mark a file normal and clean.'''
379 s = os.lstat(self._join(f))
379 s = os.lstat(self._join(f))
380 mtime = int(s.st_mtime)
380 mtime = int(s.st_mtime)
381 self._addpath(f, 'n', s.st_mode,
381 self._addpath(f, 'n', s.st_mode,
382 s.st_size & _rangemask, mtime & _rangemask)
382 s.st_size & _rangemask, mtime & _rangemask)
383 if f in self._copymap:
383 if f in self._copymap:
384 del self._copymap[f]
384 del self._copymap[f]
385 if mtime > self._lastnormaltime:
385 if mtime > self._lastnormaltime:
386 # Remember the most recent modification timeslot for status(),
386 # Remember the most recent modification timeslot for status(),
387 # to make sure we won't miss future size-preserving file content
387 # to make sure we won't miss future size-preserving file content
388 # modifications that happen within the same timeslot.
388 # modifications that happen within the same timeslot.
389 self._lastnormaltime = mtime
389 self._lastnormaltime = mtime
390
390
391 def normallookup(self, f):
391 def normallookup(self, f):
392 '''Mark a file normal, but possibly dirty.'''
392 '''Mark a file normal, but possibly dirty.'''
393 if self._pl[1] != nullid and f in self._map:
393 if self._pl[1] != nullid and f in self._map:
394 # if there is a merge going on and the file was either
394 # if there is a merge going on and the file was either
395 # in state 'm' (-1) or coming from other parent (-2) before
395 # in state 'm' (-1) or coming from other parent (-2) before
396 # being removed, restore that state.
396 # being removed, restore that state.
397 entry = self._map[f]
397 entry = self._map[f]
398 if entry[0] == 'r' and entry[2] in (-1, -2):
398 if entry[0] == 'r' and entry[2] in (-1, -2):
399 source = self._copymap.get(f)
399 source = self._copymap.get(f)
400 if entry[2] == -1:
400 if entry[2] == -1:
401 self.merge(f)
401 self.merge(f)
402 elif entry[2] == -2:
402 elif entry[2] == -2:
403 self.otherparent(f)
403 self.otherparent(f)
404 if source:
404 if source:
405 self.copy(source, f)
405 self.copy(source, f)
406 return
406 return
407 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
407 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
408 return
408 return
409 self._addpath(f, 'n', 0, -1, -1)
409 self._addpath(f, 'n', 0, -1, -1)
410 if f in self._copymap:
410 if f in self._copymap:
411 del self._copymap[f]
411 del self._copymap[f]
412
412
413 def otherparent(self, f):
413 def otherparent(self, f):
414 '''Mark as coming from the other parent, always dirty.'''
414 '''Mark as coming from the other parent, always dirty.'''
415 if self._pl[1] == nullid:
415 if self._pl[1] == nullid:
416 raise util.Abort(_("setting %r to other parent "
416 raise util.Abort(_("setting %r to other parent "
417 "only allowed in merges") % f)
417 "only allowed in merges") % f)
418 self._addpath(f, 'n', 0, -2, -1)
418 if f in self and self[f] == 'n':
419 # merge-like
420 self._addpath(f, 'm', 0, -2, -1)
421 else:
422 # add-like
423 self._addpath(f, 'n', 0, -2, -1)
424
419 if f in self._copymap:
425 if f in self._copymap:
420 del self._copymap[f]
426 del self._copymap[f]
421
427
422 def add(self, f):
428 def add(self, f):
423 '''Mark a file added.'''
429 '''Mark a file added.'''
424 self._addpath(f, 'a', 0, -1, -1)
430 self._addpath(f, 'a', 0, -1, -1)
425 if f in self._copymap:
431 if f in self._copymap:
426 del self._copymap[f]
432 del self._copymap[f]
427
433
428 def remove(self, f):
434 def remove(self, f):
429 '''Mark a file removed.'''
435 '''Mark a file removed.'''
430 self._dirty = True
436 self._dirty = True
431 self._droppath(f)
437 self._droppath(f)
432 size = 0
438 size = 0
433 if self._pl[1] != nullid and f in self._map:
439 if self._pl[1] != nullid and f in self._map:
434 # backup the previous state
440 # backup the previous state
435 entry = self._map[f]
441 entry = self._map[f]
436 if entry[0] == 'm': # merge
442 if entry[0] == 'm': # merge
437 size = -1
443 size = -1
438 elif entry[0] == 'n' and entry[2] == -2: # other parent
444 elif entry[0] == 'n' and entry[2] == -2: # other parent
439 size = -2
445 size = -2
440 self._map[f] = dirstatetuple('r', 0, size, 0)
446 self._map[f] = dirstatetuple('r', 0, size, 0)
441 if size == 0 and f in self._copymap:
447 if size == 0 and f in self._copymap:
442 del self._copymap[f]
448 del self._copymap[f]
443
449
444 def merge(self, f):
450 def merge(self, f):
445 '''Mark a file merged.'''
451 '''Mark a file merged.'''
446 if self._pl[1] == nullid:
452 if self._pl[1] == nullid:
447 return self.normallookup(f)
453 return self.normallookup(f)
448 s = os.lstat(self._join(f))
454 s = os.lstat(self._join(f))
449 self._addpath(f, 'm', s.st_mode,
455 self._addpath(f, 'm', s.st_mode,
450 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
456 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
451 if f in self._copymap:
457 if f in self._copymap:
452 del self._copymap[f]
458 del self._copymap[f]
453
459
454 def drop(self, f):
460 def drop(self, f):
455 '''Drop a file from the dirstate'''
461 '''Drop a file from the dirstate'''
456 if f in self._map:
462 if f in self._map:
457 self._dirty = True
463 self._dirty = True
458 self._droppath(f)
464 self._droppath(f)
459 del self._map[f]
465 del self._map[f]
460
466
461 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
467 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
462 normed = util.normcase(path)
468 normed = util.normcase(path)
463 folded = self._foldmap.get(normed, None)
469 folded = self._foldmap.get(normed, None)
464 if folded is None:
470 if folded is None:
465 if isknown:
471 if isknown:
466 folded = path
472 folded = path
467 else:
473 else:
468 if exists is None:
474 if exists is None:
469 exists = os.path.lexists(os.path.join(self._root, path))
475 exists = os.path.lexists(os.path.join(self._root, path))
470 if not exists:
476 if not exists:
471 # Maybe a path component exists
477 # Maybe a path component exists
472 if not ignoremissing and '/' in path:
478 if not ignoremissing and '/' in path:
473 d, f = path.rsplit('/', 1)
479 d, f = path.rsplit('/', 1)
474 d = self._normalize(d, isknown, ignoremissing, None)
480 d = self._normalize(d, isknown, ignoremissing, None)
475 folded = d + "/" + f
481 folded = d + "/" + f
476 else:
482 else:
477 # No path components, preserve original case
483 # No path components, preserve original case
478 folded = path
484 folded = path
479 else:
485 else:
480 # recursively normalize leading directory components
486 # recursively normalize leading directory components
481 # against dirstate
487 # against dirstate
482 if '/' in normed:
488 if '/' in normed:
483 d, f = normed.rsplit('/', 1)
489 d, f = normed.rsplit('/', 1)
484 d = self._normalize(d, isknown, ignoremissing, True)
490 d = self._normalize(d, isknown, ignoremissing, True)
485 r = self._root + "/" + d
491 r = self._root + "/" + d
486 folded = d + "/" + util.fspath(f, r)
492 folded = d + "/" + util.fspath(f, r)
487 else:
493 else:
488 folded = util.fspath(normed, self._root)
494 folded = util.fspath(normed, self._root)
489 self._foldmap[normed] = folded
495 self._foldmap[normed] = folded
490
496
491 return folded
497 return folded
492
498
493 def normalize(self, path, isknown=False, ignoremissing=False):
499 def normalize(self, path, isknown=False, ignoremissing=False):
494 '''
500 '''
495 normalize the case of a pathname when on a casefolding filesystem
501 normalize the case of a pathname when on a casefolding filesystem
496
502
497 isknown specifies whether the filename came from walking the
503 isknown specifies whether the filename came from walking the
498 disk, to avoid extra filesystem access.
504 disk, to avoid extra filesystem access.
499
505
500 If ignoremissing is True, missing path are returned
506 If ignoremissing is True, missing path are returned
501 unchanged. Otherwise, we try harder to normalize possibly
507 unchanged. Otherwise, we try harder to normalize possibly
502 existing path components.
508 existing path components.
503
509
504 The normalized case is determined based on the following precedence:
510 The normalized case is determined based on the following precedence:
505
511
506 - version of name already stored in the dirstate
512 - version of name already stored in the dirstate
507 - version of name stored on disk
513 - version of name stored on disk
508 - version provided via command arguments
514 - version provided via command arguments
509 '''
515 '''
510
516
511 if self._checkcase:
517 if self._checkcase:
512 return self._normalize(path, isknown, ignoremissing)
518 return self._normalize(path, isknown, ignoremissing)
513 return path
519 return path
514
520
515 def clear(self):
521 def clear(self):
516 self._map = {}
522 self._map = {}
517 if "_dirs" in self.__dict__:
523 if "_dirs" in self.__dict__:
518 delattr(self, "_dirs")
524 delattr(self, "_dirs")
519 self._copymap = {}
525 self._copymap = {}
520 self._pl = [nullid, nullid]
526 self._pl = [nullid, nullid]
521 self._lastnormaltime = 0
527 self._lastnormaltime = 0
522 self._dirty = True
528 self._dirty = True
523
529
524 def rebuild(self, parent, allfiles, changedfiles=None):
530 def rebuild(self, parent, allfiles, changedfiles=None):
525 changedfiles = changedfiles or allfiles
531 changedfiles = changedfiles or allfiles
526 oldmap = self._map
532 oldmap = self._map
527 self.clear()
533 self.clear()
528 for f in allfiles:
534 for f in allfiles:
529 if f not in changedfiles:
535 if f not in changedfiles:
530 self._map[f] = oldmap[f]
536 self._map[f] = oldmap[f]
531 else:
537 else:
532 if 'x' in allfiles.flags(f):
538 if 'x' in allfiles.flags(f):
533 self._map[f] = dirstatetuple('n', 0777, -1, 0)
539 self._map[f] = dirstatetuple('n', 0777, -1, 0)
534 else:
540 else:
535 self._map[f] = dirstatetuple('n', 0666, -1, 0)
541 self._map[f] = dirstatetuple('n', 0666, -1, 0)
536 self._pl = (parent, nullid)
542 self._pl = (parent, nullid)
537 self._dirty = True
543 self._dirty = True
538
544
539 def write(self):
545 def write(self):
540 if not self._dirty:
546 if not self._dirty:
541 return
547 return
542
548
543 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
549 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
544 # timestamp of each entries in dirstate, because of 'now > mtime'
550 # timestamp of each entries in dirstate, because of 'now > mtime'
545 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
551 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
546 if delaywrite:
552 if delaywrite:
547 import time # to avoid useless import
553 import time # to avoid useless import
548 time.sleep(delaywrite)
554 time.sleep(delaywrite)
549
555
550 st = self._opener("dirstate", "w", atomictemp=True)
556 st = self._opener("dirstate", "w", atomictemp=True)
551 # use the modification time of the newly created temporary file as the
557 # use the modification time of the newly created temporary file as the
552 # filesystem's notion of 'now'
558 # filesystem's notion of 'now'
553 now = util.fstat(st).st_mtime
559 now = util.fstat(st).st_mtime
554 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
560 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
555 st.close()
561 st.close()
556 self._lastnormaltime = 0
562 self._lastnormaltime = 0
557 self._dirty = self._dirtypl = False
563 self._dirty = self._dirtypl = False
558
564
559 def _dirignore(self, f):
565 def _dirignore(self, f):
560 if f == '.':
566 if f == '.':
561 return False
567 return False
562 if self._ignore(f):
568 if self._ignore(f):
563 return True
569 return True
564 for p in scmutil.finddirs(f):
570 for p in scmutil.finddirs(f):
565 if self._ignore(p):
571 if self._ignore(p):
566 return True
572 return True
567 return False
573 return False
568
574
569 def _walkexplicit(self, match, subrepos):
575 def _walkexplicit(self, match, subrepos):
570 '''Get stat data about the files explicitly specified by match.
576 '''Get stat data about the files explicitly specified by match.
571
577
572 Return a triple (results, dirsfound, dirsnotfound).
578 Return a triple (results, dirsfound, dirsnotfound).
573 - results is a mapping from filename to stat result. It also contains
579 - results is a mapping from filename to stat result. It also contains
574 listings mapping subrepos and .hg to None.
580 listings mapping subrepos and .hg to None.
575 - dirsfound is a list of files found to be directories.
581 - dirsfound is a list of files found to be directories.
576 - dirsnotfound is a list of files that the dirstate thinks are
582 - dirsnotfound is a list of files that the dirstate thinks are
577 directories and that were not found.'''
583 directories and that were not found.'''
578
584
579 def badtype(mode):
585 def badtype(mode):
580 kind = _('unknown')
586 kind = _('unknown')
581 if stat.S_ISCHR(mode):
587 if stat.S_ISCHR(mode):
582 kind = _('character device')
588 kind = _('character device')
583 elif stat.S_ISBLK(mode):
589 elif stat.S_ISBLK(mode):
584 kind = _('block device')
590 kind = _('block device')
585 elif stat.S_ISFIFO(mode):
591 elif stat.S_ISFIFO(mode):
586 kind = _('fifo')
592 kind = _('fifo')
587 elif stat.S_ISSOCK(mode):
593 elif stat.S_ISSOCK(mode):
588 kind = _('socket')
594 kind = _('socket')
589 elif stat.S_ISDIR(mode):
595 elif stat.S_ISDIR(mode):
590 kind = _('directory')
596 kind = _('directory')
591 return _('unsupported file type (type is %s)') % kind
597 return _('unsupported file type (type is %s)') % kind
592
598
593 matchedir = match.explicitdir
599 matchedir = match.explicitdir
594 badfn = match.bad
600 badfn = match.bad
595 dmap = self._map
601 dmap = self._map
596 normpath = util.normpath
602 normpath = util.normpath
597 lstat = os.lstat
603 lstat = os.lstat
598 getkind = stat.S_IFMT
604 getkind = stat.S_IFMT
599 dirkind = stat.S_IFDIR
605 dirkind = stat.S_IFDIR
600 regkind = stat.S_IFREG
606 regkind = stat.S_IFREG
601 lnkkind = stat.S_IFLNK
607 lnkkind = stat.S_IFLNK
602 join = self._join
608 join = self._join
603 dirsfound = []
609 dirsfound = []
604 foundadd = dirsfound.append
610 foundadd = dirsfound.append
605 dirsnotfound = []
611 dirsnotfound = []
606 notfoundadd = dirsnotfound.append
612 notfoundadd = dirsnotfound.append
607
613
608 if match.matchfn != match.exact and self._checkcase:
614 if match.matchfn != match.exact and self._checkcase:
609 normalize = self._normalize
615 normalize = self._normalize
610 else:
616 else:
611 normalize = None
617 normalize = None
612
618
613 files = sorted(match.files())
619 files = sorted(match.files())
614 subrepos.sort()
620 subrepos.sort()
615 i, j = 0, 0
621 i, j = 0, 0
616 while i < len(files) and j < len(subrepos):
622 while i < len(files) and j < len(subrepos):
617 subpath = subrepos[j] + "/"
623 subpath = subrepos[j] + "/"
618 if files[i] < subpath:
624 if files[i] < subpath:
619 i += 1
625 i += 1
620 continue
626 continue
621 while i < len(files) and files[i].startswith(subpath):
627 while i < len(files) and files[i].startswith(subpath):
622 del files[i]
628 del files[i]
623 j += 1
629 j += 1
624
630
625 if not files or '.' in files:
631 if not files or '.' in files:
626 files = ['']
632 files = ['']
627 results = dict.fromkeys(subrepos)
633 results = dict.fromkeys(subrepos)
628 results['.hg'] = None
634 results['.hg'] = None
629
635
630 for ff in files:
636 for ff in files:
631 if normalize:
637 if normalize:
632 nf = normalize(normpath(ff), False, True)
638 nf = normalize(normpath(ff), False, True)
633 else:
639 else:
634 nf = normpath(ff)
640 nf = normpath(ff)
635 if nf in results:
641 if nf in results:
636 continue
642 continue
637
643
638 try:
644 try:
639 st = lstat(join(nf))
645 st = lstat(join(nf))
640 kind = getkind(st.st_mode)
646 kind = getkind(st.st_mode)
641 if kind == dirkind:
647 if kind == dirkind:
642 if nf in dmap:
648 if nf in dmap:
643 # file replaced by dir on disk but still in dirstate
649 # file replaced by dir on disk but still in dirstate
644 results[nf] = None
650 results[nf] = None
645 if matchedir:
651 if matchedir:
646 matchedir(nf)
652 matchedir(nf)
647 foundadd(nf)
653 foundadd(nf)
648 elif kind == regkind or kind == lnkkind:
654 elif kind == regkind or kind == lnkkind:
649 results[nf] = st
655 results[nf] = st
650 else:
656 else:
651 badfn(ff, badtype(kind))
657 badfn(ff, badtype(kind))
652 if nf in dmap:
658 if nf in dmap:
653 results[nf] = None
659 results[nf] = None
654 except OSError, inst: # nf not found on disk - it is dirstate only
660 except OSError, inst: # nf not found on disk - it is dirstate only
655 if nf in dmap: # does it exactly match a missing file?
661 if nf in dmap: # does it exactly match a missing file?
656 results[nf] = None
662 results[nf] = None
657 else: # does it match a missing directory?
663 else: # does it match a missing directory?
658 prefix = nf + "/"
664 prefix = nf + "/"
659 for fn in dmap:
665 for fn in dmap:
660 if fn.startswith(prefix):
666 if fn.startswith(prefix):
661 if matchedir:
667 if matchedir:
662 matchedir(nf)
668 matchedir(nf)
663 notfoundadd(nf)
669 notfoundadd(nf)
664 break
670 break
665 else:
671 else:
666 badfn(ff, inst.strerror)
672 badfn(ff, inst.strerror)
667
673
668 return results, dirsfound, dirsnotfound
674 return results, dirsfound, dirsnotfound
669
675
670 def walk(self, match, subrepos, unknown, ignored, full=True):
676 def walk(self, match, subrepos, unknown, ignored, full=True):
671 '''
677 '''
672 Walk recursively through the directory tree, finding all files
678 Walk recursively through the directory tree, finding all files
673 matched by match.
679 matched by match.
674
680
675 If full is False, maybe skip some known-clean files.
681 If full is False, maybe skip some known-clean files.
676
682
677 Return a dict mapping filename to stat-like object (either
683 Return a dict mapping filename to stat-like object (either
678 mercurial.osutil.stat instance or return value of os.stat()).
684 mercurial.osutil.stat instance or return value of os.stat()).
679
685
680 '''
686 '''
681 # full is a flag that extensions that hook into walk can use -- this
687 # full is a flag that extensions that hook into walk can use -- this
682 # implementation doesn't use it at all. This satisfies the contract
688 # implementation doesn't use it at all. This satisfies the contract
683 # because we only guarantee a "maybe".
689 # because we only guarantee a "maybe".
684
690
685 if ignored:
691 if ignored:
686 ignore = util.never
692 ignore = util.never
687 dirignore = util.never
693 dirignore = util.never
688 elif unknown:
694 elif unknown:
689 ignore = self._ignore
695 ignore = self._ignore
690 dirignore = self._dirignore
696 dirignore = self._dirignore
691 else:
697 else:
692 # if not unknown and not ignored, drop dir recursion and step 2
698 # if not unknown and not ignored, drop dir recursion and step 2
693 ignore = util.always
699 ignore = util.always
694 dirignore = util.always
700 dirignore = util.always
695
701
696 matchfn = match.matchfn
702 matchfn = match.matchfn
697 matchalways = match.always()
703 matchalways = match.always()
698 matchtdir = match.traversedir
704 matchtdir = match.traversedir
699 dmap = self._map
705 dmap = self._map
700 listdir = osutil.listdir
706 listdir = osutil.listdir
701 lstat = os.lstat
707 lstat = os.lstat
702 dirkind = stat.S_IFDIR
708 dirkind = stat.S_IFDIR
703 regkind = stat.S_IFREG
709 regkind = stat.S_IFREG
704 lnkkind = stat.S_IFLNK
710 lnkkind = stat.S_IFLNK
705 join = self._join
711 join = self._join
706
712
707 exact = skipstep3 = False
713 exact = skipstep3 = False
708 if matchfn == match.exact: # match.exact
714 if matchfn == match.exact: # match.exact
709 exact = True
715 exact = True
710 dirignore = util.always # skip step 2
716 dirignore = util.always # skip step 2
711 elif match.files() and not match.anypats(): # match.match, no patterns
717 elif match.files() and not match.anypats(): # match.match, no patterns
712 skipstep3 = True
718 skipstep3 = True
713
719
714 if not exact and self._checkcase:
720 if not exact and self._checkcase:
715 normalize = self._normalize
721 normalize = self._normalize
716 skipstep3 = False
722 skipstep3 = False
717 else:
723 else:
718 normalize = None
724 normalize = None
719
725
720 # step 1: find all explicit files
726 # step 1: find all explicit files
721 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
727 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
722
728
723 skipstep3 = skipstep3 and not (work or dirsnotfound)
729 skipstep3 = skipstep3 and not (work or dirsnotfound)
724 work = [d for d in work if not dirignore(d)]
730 work = [d for d in work if not dirignore(d)]
725 wadd = work.append
731 wadd = work.append
726
732
727 # step 2: visit subdirectories
733 # step 2: visit subdirectories
728 while work:
734 while work:
729 nd = work.pop()
735 nd = work.pop()
730 skip = None
736 skip = None
731 if nd == '.':
737 if nd == '.':
732 nd = ''
738 nd = ''
733 else:
739 else:
734 skip = '.hg'
740 skip = '.hg'
735 try:
741 try:
736 entries = listdir(join(nd), stat=True, skip=skip)
742 entries = listdir(join(nd), stat=True, skip=skip)
737 except OSError, inst:
743 except OSError, inst:
738 if inst.errno in (errno.EACCES, errno.ENOENT):
744 if inst.errno in (errno.EACCES, errno.ENOENT):
739 match.bad(self.pathto(nd), inst.strerror)
745 match.bad(self.pathto(nd), inst.strerror)
740 continue
746 continue
741 raise
747 raise
742 for f, kind, st in entries:
748 for f, kind, st in entries:
743 if normalize:
749 if normalize:
744 nf = normalize(nd and (nd + "/" + f) or f, True, True)
750 nf = normalize(nd and (nd + "/" + f) or f, True, True)
745 else:
751 else:
746 nf = nd and (nd + "/" + f) or f
752 nf = nd and (nd + "/" + f) or f
747 if nf not in results:
753 if nf not in results:
748 if kind == dirkind:
754 if kind == dirkind:
749 if not ignore(nf):
755 if not ignore(nf):
750 if matchtdir:
756 if matchtdir:
751 matchtdir(nf)
757 matchtdir(nf)
752 wadd(nf)
758 wadd(nf)
753 if nf in dmap and (matchalways or matchfn(nf)):
759 if nf in dmap and (matchalways or matchfn(nf)):
754 results[nf] = None
760 results[nf] = None
755 elif kind == regkind or kind == lnkkind:
761 elif kind == regkind or kind == lnkkind:
756 if nf in dmap:
762 if nf in dmap:
757 if matchalways or matchfn(nf):
763 if matchalways or matchfn(nf):
758 results[nf] = st
764 results[nf] = st
759 elif (matchalways or matchfn(nf)) and not ignore(nf):
765 elif (matchalways or matchfn(nf)) and not ignore(nf):
760 results[nf] = st
766 results[nf] = st
761 elif nf in dmap and (matchalways or matchfn(nf)):
767 elif nf in dmap and (matchalways or matchfn(nf)):
762 results[nf] = None
768 results[nf] = None
763
769
764 for s in subrepos:
770 for s in subrepos:
765 del results[s]
771 del results[s]
766 del results['.hg']
772 del results['.hg']
767
773
768 # step 3: visit remaining files from dmap
774 # step 3: visit remaining files from dmap
769 if not skipstep3 and not exact:
775 if not skipstep3 and not exact:
770 # If a dmap file is not in results yet, it was either
776 # If a dmap file is not in results yet, it was either
771 # a) not matching matchfn b) ignored, c) missing, or d) under a
777 # a) not matching matchfn b) ignored, c) missing, or d) under a
772 # symlink directory.
778 # symlink directory.
773 if not results and matchalways:
779 if not results and matchalways:
774 visit = dmap.keys()
780 visit = dmap.keys()
775 else:
781 else:
776 visit = [f for f in dmap if f not in results and matchfn(f)]
782 visit = [f for f in dmap if f not in results and matchfn(f)]
777 visit.sort()
783 visit.sort()
778
784
779 if unknown:
785 if unknown:
780 # unknown == True means we walked all dirs under the roots
786 # unknown == True means we walked all dirs under the roots
781 # that wasn't ignored, and everything that matched was stat'ed
787 # that wasn't ignored, and everything that matched was stat'ed
782 # and is already in results.
788 # and is already in results.
783 # The rest must thus be ignored or under a symlink.
789 # The rest must thus be ignored or under a symlink.
784 audit_path = pathutil.pathauditor(self._root)
790 audit_path = pathutil.pathauditor(self._root)
785
791
786 for nf in iter(visit):
792 for nf in iter(visit):
787 # Report ignored items in the dmap as long as they are not
793 # Report ignored items in the dmap as long as they are not
788 # under a symlink directory.
794 # under a symlink directory.
789 if audit_path.check(nf):
795 if audit_path.check(nf):
790 try:
796 try:
791 results[nf] = lstat(join(nf))
797 results[nf] = lstat(join(nf))
792 # file was just ignored, no links, and exists
798 # file was just ignored, no links, and exists
793 except OSError:
799 except OSError:
794 # file doesn't exist
800 # file doesn't exist
795 results[nf] = None
801 results[nf] = None
796 else:
802 else:
797 # It's either missing or under a symlink directory
803 # It's either missing or under a symlink directory
798 # which we in this case report as missing
804 # which we in this case report as missing
799 results[nf] = None
805 results[nf] = None
800 else:
806 else:
801 # We may not have walked the full directory tree above,
807 # We may not have walked the full directory tree above,
802 # so stat and check everything we missed.
808 # so stat and check everything we missed.
803 nf = iter(visit).next
809 nf = iter(visit).next
804 for st in util.statfiles([join(i) for i in visit]):
810 for st in util.statfiles([join(i) for i in visit]):
805 results[nf()] = st
811 results[nf()] = st
806 return results
812 return results
807
813
808 def status(self, match, subrepos, ignored, clean, unknown):
814 def status(self, match, subrepos, ignored, clean, unknown):
809 '''Determine the status of the working copy relative to the
815 '''Determine the status of the working copy relative to the
810 dirstate and return a tuple of lists (unsure, modified, added,
816 dirstate and return a tuple of lists (unsure, modified, added,
811 removed, deleted, unknown, ignored, clean), where:
817 removed, deleted, unknown, ignored, clean), where:
812
818
813 unsure:
819 unsure:
814 files that might have been modified since the dirstate was
820 files that might have been modified since the dirstate was
815 written, but need to be read to be sure (size is the same
821 written, but need to be read to be sure (size is the same
816 but mtime differs)
822 but mtime differs)
817 modified:
823 modified:
818 files that have definitely been modified since the dirstate
824 files that have definitely been modified since the dirstate
819 was written (different size or mode)
825 was written (different size or mode)
820 added:
826 added:
821 files that have been explicitly added with hg add
827 files that have been explicitly added with hg add
822 removed:
828 removed:
823 files that have been explicitly removed with hg remove
829 files that have been explicitly removed with hg remove
824 deleted:
830 deleted:
825 files that have been deleted through other means ("missing")
831 files that have been deleted through other means ("missing")
826 unknown:
832 unknown:
827 files not in the dirstate that are not ignored
833 files not in the dirstate that are not ignored
828 ignored:
834 ignored:
829 files not in the dirstate that are ignored
835 files not in the dirstate that are ignored
830 (by _dirignore())
836 (by _dirignore())
831 clean:
837 clean:
832 files that have definitely not been modified since the
838 files that have definitely not been modified since the
833 dirstate was written
839 dirstate was written
834 '''
840 '''
835 listignored, listclean, listunknown = ignored, clean, unknown
841 listignored, listclean, listunknown = ignored, clean, unknown
836 lookup, modified, added, unknown, ignored = [], [], [], [], []
842 lookup, modified, added, unknown, ignored = [], [], [], [], []
837 removed, deleted, clean = [], [], []
843 removed, deleted, clean = [], [], []
838
844
839 dmap = self._map
845 dmap = self._map
840 ladd = lookup.append # aka "unsure"
846 ladd = lookup.append # aka "unsure"
841 madd = modified.append
847 madd = modified.append
842 aadd = added.append
848 aadd = added.append
843 uadd = unknown.append
849 uadd = unknown.append
844 iadd = ignored.append
850 iadd = ignored.append
845 radd = removed.append
851 radd = removed.append
846 dadd = deleted.append
852 dadd = deleted.append
847 cadd = clean.append
853 cadd = clean.append
848 mexact = match.exact
854 mexact = match.exact
849 dirignore = self._dirignore
855 dirignore = self._dirignore
850 checkexec = self._checkexec
856 checkexec = self._checkexec
851 copymap = self._copymap
857 copymap = self._copymap
852 lastnormaltime = self._lastnormaltime
858 lastnormaltime = self._lastnormaltime
853
859
854 # We need to do full walks when either
860 # We need to do full walks when either
855 # - we're listing all clean files, or
861 # - we're listing all clean files, or
856 # - match.traversedir does something, because match.traversedir should
862 # - match.traversedir does something, because match.traversedir should
857 # be called for every dir in the working dir
863 # be called for every dir in the working dir
858 full = listclean or match.traversedir is not None
864 full = listclean or match.traversedir is not None
859 for fn, st in self.walk(match, subrepos, listunknown, listignored,
865 for fn, st in self.walk(match, subrepos, listunknown, listignored,
860 full=full).iteritems():
866 full=full).iteritems():
861 if fn not in dmap:
867 if fn not in dmap:
862 if (listignored or mexact(fn)) and dirignore(fn):
868 if (listignored or mexact(fn)) and dirignore(fn):
863 if listignored:
869 if listignored:
864 iadd(fn)
870 iadd(fn)
865 else:
871 else:
866 uadd(fn)
872 uadd(fn)
867 continue
873 continue
868
874
869 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
875 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
870 # written like that for performance reasons. dmap[fn] is not a
876 # written like that for performance reasons. dmap[fn] is not a
871 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
877 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
872 # opcode has fast paths when the value to be unpacked is a tuple or
878 # opcode has fast paths when the value to be unpacked is a tuple or
873 # a list, but falls back to creating a full-fledged iterator in
879 # a list, but falls back to creating a full-fledged iterator in
874 # general. That is much slower than simply accessing and storing the
880 # general. That is much slower than simply accessing and storing the
875 # tuple members one by one.
881 # tuple members one by one.
876 t = dmap[fn]
882 t = dmap[fn]
877 state = t[0]
883 state = t[0]
878 mode = t[1]
884 mode = t[1]
879 size = t[2]
885 size = t[2]
880 time = t[3]
886 time = t[3]
881
887
882 if not st and state in "nma":
888 if not st and state in "nma":
883 dadd(fn)
889 dadd(fn)
884 elif state == 'n':
890 elif state == 'n':
885 mtime = int(st.st_mtime)
891 mtime = int(st.st_mtime)
886 if (size >= 0 and
892 if (size >= 0 and
887 ((size != st.st_size and size != st.st_size & _rangemask)
893 ((size != st.st_size and size != st.st_size & _rangemask)
888 or ((mode ^ st.st_mode) & 0100 and checkexec))
894 or ((mode ^ st.st_mode) & 0100 and checkexec))
889 or size == -2 # other parent
895 or size == -2 # other parent
890 or fn in copymap):
896 or fn in copymap):
891 madd(fn)
897 madd(fn)
892 elif time != mtime and time != mtime & _rangemask:
898 elif time != mtime and time != mtime & _rangemask:
893 ladd(fn)
899 ladd(fn)
894 elif mtime == lastnormaltime:
900 elif mtime == lastnormaltime:
895 # fn may have been changed in the same timeslot without
901 # fn may have been changed in the same timeslot without
896 # changing its size. This can happen if we quickly do
902 # changing its size. This can happen if we quickly do
897 # multiple commits in a single transaction.
903 # multiple commits in a single transaction.
898 # Force lookup, so we don't miss such a racy file change.
904 # Force lookup, so we don't miss such a racy file change.
899 ladd(fn)
905 ladd(fn)
900 elif listclean:
906 elif listclean:
901 cadd(fn)
907 cadd(fn)
902 elif state == 'm':
908 elif state == 'm':
903 madd(fn)
909 madd(fn)
904 elif state == 'a':
910 elif state == 'a':
905 aadd(fn)
911 aadd(fn)
906 elif state == 'r':
912 elif state == 'r':
907 radd(fn)
913 radd(fn)
908
914
909 return (lookup, modified, added, removed, deleted, unknown, ignored,
915 return (lookup, modified, added, removed, deleted, unknown, ignored,
910 clean)
916 clean)
911
917
912 def matches(self, match):
918 def matches(self, match):
913 '''
919 '''
914 return files in the dirstate (in whatever state) filtered by match
920 return files in the dirstate (in whatever state) filtered by match
915 '''
921 '''
916 dmap = self._map
922 dmap = self._map
917 if match.always():
923 if match.always():
918 return dmap.keys()
924 return dmap.keys()
919 files = match.files()
925 files = match.files()
920 if match.matchfn == match.exact:
926 if match.matchfn == match.exact:
921 # fast path -- filter the other way around, since typically files is
927 # fast path -- filter the other way around, since typically files is
922 # much smaller than dmap
928 # much smaller than dmap
923 return [f for f in files if f in dmap]
929 return [f for f in files if f in dmap]
924 if not match.anypats() and util.all(fn in dmap for fn in files):
930 if not match.anypats() and util.all(fn in dmap for fn in files):
925 # fast path -- all the values are known to be files, so just return
931 # fast path -- all the values are known to be files, so just return
926 # that
932 # that
927 return list(files)
933 return list(files)
928 return [f for f in dmap if match(f)]
934 return [f for f in dmap if match(f)]
@@ -1,149 +1,144 b''
1 This test makes sure that we don't mark a file as merged with its ancestor
1 This test makes sure that we don't mark a file as merged with its ancestor
2 when we do a merge.
2 when we do a merge.
3
3
4 $ cat <<EOF > merge
4 $ cat <<EOF > merge
5 > import sys, os
5 > import sys, os
6 > print "merging for", os.path.basename(sys.argv[1])
6 > print "merging for", os.path.basename(sys.argv[1])
7 > EOF
7 > EOF
8 $ HGMERGE="python ../merge"; export HGMERGE
8 $ HGMERGE="python ../merge"; export HGMERGE
9
9
10 Creating base:
10 Creating base:
11
11
12 $ hg init a
12 $ hg init a
13 $ cd a
13 $ cd a
14 $ echo 1 > foo
14 $ echo 1 > foo
15 $ echo 1 > bar
15 $ echo 1 > bar
16 $ echo 1 > baz
16 $ echo 1 > baz
17 $ echo 1 > quux
17 $ echo 1 > quux
18 $ hg add foo bar baz quux
18 $ hg add foo bar baz quux
19 $ hg commit -m "base"
19 $ hg commit -m "base"
20
20
21 $ cd ..
21 $ cd ..
22 $ hg clone a b
22 $ hg clone a b
23 updating to branch default
23 updating to branch default
24 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
25
25
26 Creating branch a:
26 Creating branch a:
27
27
28 $ cd a
28 $ cd a
29 $ echo 2a > foo
29 $ echo 2a > foo
30 $ echo 2a > bar
30 $ echo 2a > bar
31 $ hg commit -m "branch a"
31 $ hg commit -m "branch a"
32
32
33 Creating branch b:
33 Creating branch b:
34
34
35 $ cd ..
35 $ cd ..
36 $ cd b
36 $ cd b
37 $ echo 2b > foo
37 $ echo 2b > foo
38 $ echo 2b > baz
38 $ echo 2b > baz
39 $ hg commit -m "branch b"
39 $ hg commit -m "branch b"
40
40
41 We shouldn't have anything but n state here:
41 We shouldn't have anything but n state here:
42
42
43 $ hg debugstate --nodates | grep -v "^n"
43 $ hg debugstate --nodates | grep -v "^n"
44 [1]
44 [1]
45
45
46 Merging:
46 Merging:
47
47
48 $ hg pull ../a
48 $ hg pull ../a
49 pulling from ../a
49 pulling from ../a
50 searching for changes
50 searching for changes
51 adding changesets
51 adding changesets
52 adding manifests
52 adding manifests
53 adding file changes
53 adding file changes
54 added 1 changesets with 2 changes to 2 files (+1 heads)
54 added 1 changesets with 2 changes to 2 files (+1 heads)
55 (run 'hg heads' to see heads, 'hg merge' to merge)
55 (run 'hg heads' to see heads, 'hg merge' to merge)
56
56
57 $ hg merge -v
57 $ hg merge -v
58 resolving manifests
58 resolving manifests
59 getting bar
59 getting bar
60 merging foo
60 merging foo
61 merging for foo
61 merging for foo
62 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
62 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
63 (branch merge, don't forget to commit)
63 (branch merge, don't forget to commit)
64
64
65 $ echo 2m > foo
65 $ echo 2m > foo
66 $ echo 2b > baz
66 $ echo 2b > baz
67 $ echo new > quux
67 $ echo new > quux
68
68
69 We shouldn't have anything but foo in merge state here:
70
71 $ hg debugstate --nodates | grep "^m"
72 m 644 3 foo
73
74 $ hg ci -m "merge"
69 $ hg ci -m "merge"
75
70
76 main: we should have a merge here:
71 main: we should have a merge here:
77
72
78 $ hg debugindex --changelog
73 $ hg debugindex --changelog
79 rev offset length ..... linkrev nodeid p1 p2 (re)
74 rev offset length ..... linkrev nodeid p1 p2 (re)
80 0 0 73 ..... 0 cdca01651b96 000000000000 000000000000 (re)
75 0 0 73 ..... 0 cdca01651b96 000000000000 000000000000 (re)
81 1 73 68 ..... 1 f6718a9cb7f3 cdca01651b96 000000000000 (re)
76 1 73 68 ..... 1 f6718a9cb7f3 cdca01651b96 000000000000 (re)
82 2 141 68 ..... 2 bdd988058d16 cdca01651b96 000000000000 (re)
77 2 141 68 ..... 2 bdd988058d16 cdca01651b96 000000000000 (re)
83 3 209 66 ..... 3 d8a521142a3c f6718a9cb7f3 bdd988058d16 (re)
78 3 209 66 ..... 3 d8a521142a3c f6718a9cb7f3 bdd988058d16 (re)
84
79
85 log should show foo and quux changed:
80 log should show foo and quux changed:
86
81
87 $ hg log -v -r tip
82 $ hg log -v -r tip
88 changeset: 3:d8a521142a3c
83 changeset: 3:d8a521142a3c
89 tag: tip
84 tag: tip
90 parent: 1:f6718a9cb7f3
85 parent: 1:f6718a9cb7f3
91 parent: 2:bdd988058d16
86 parent: 2:bdd988058d16
92 user: test
87 user: test
93 date: Thu Jan 01 00:00:00 1970 +0000
88 date: Thu Jan 01 00:00:00 1970 +0000
94 files: foo quux
89 files: foo quux
95 description:
90 description:
96 merge
91 merge
97
92
98
93
99
94
100 foo: we should have a merge here:
95 foo: we should have a merge here:
101
96
102 $ hg debugindex foo
97 $ hg debugindex foo
103 rev offset length ..... linkrev nodeid p1 p2 (re)
98 rev offset length ..... linkrev nodeid p1 p2 (re)
104 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
99 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
105 1 3 4 ..... 1 2ffeddde1b65 b8e02f643373 000000000000 (re)
100 1 3 4 ..... 1 2ffeddde1b65 b8e02f643373 000000000000 (re)
106 2 7 4 ..... 2 33d1fb69067a b8e02f643373 000000000000 (re)
101 2 7 4 ..... 2 33d1fb69067a b8e02f643373 000000000000 (re)
107 3 11 4 ..... 3 aa27919ee430 2ffeddde1b65 33d1fb69067a (re)
102 3 11 4 ..... 3 aa27919ee430 2ffeddde1b65 33d1fb69067a (re)
108
103
109 bar: we should not have a merge here:
104 bar: we should not have a merge here:
110
105
111 $ hg debugindex bar
106 $ hg debugindex bar
112 rev offset length ..... linkrev nodeid p1 p2 (re)
107 rev offset length ..... linkrev nodeid p1 p2 (re)
113 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
108 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
114 1 3 4 ..... 2 33d1fb69067a b8e02f643373 000000000000 (re)
109 1 3 4 ..... 2 33d1fb69067a b8e02f643373 000000000000 (re)
115
110
116 baz: we should not have a merge here:
111 baz: we should not have a merge here:
117
112
118 $ hg debugindex baz
113 $ hg debugindex baz
119 rev offset length ..... linkrev nodeid p1 p2 (re)
114 rev offset length ..... linkrev nodeid p1 p2 (re)
120 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
115 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
121 1 3 4 ..... 1 2ffeddde1b65 b8e02f643373 000000000000 (re)
116 1 3 4 ..... 1 2ffeddde1b65 b8e02f643373 000000000000 (re)
122
117
123 quux: we should not have a merge here:
118 quux: we should not have a merge here:
124
119
125 $ hg debugindex quux
120 $ hg debugindex quux
126 rev offset length ..... linkrev nodeid p1 p2 (re)
121 rev offset length ..... linkrev nodeid p1 p2 (re)
127 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
122 0 0 3 ..... 0 b8e02f643373 000000000000 000000000000 (re)
128 1 3 5 ..... 3 6128c0f33108 b8e02f643373 000000000000 (re)
123 1 3 5 ..... 3 6128c0f33108 b8e02f643373 000000000000 (re)
129
124
130 Manifest entries should match tips of all files:
125 Manifest entries should match tips of all files:
131
126
132 $ hg manifest --debug
127 $ hg manifest --debug
133 33d1fb69067a0139622a3fa3b7ba1cdb1367972e 644 bar
128 33d1fb69067a0139622a3fa3b7ba1cdb1367972e 644 bar
134 2ffeddde1b65b4827f6746174a145474129fa2ce 644 baz
129 2ffeddde1b65b4827f6746174a145474129fa2ce 644 baz
135 aa27919ee4303cfd575e1fb932dd64d75aa08be4 644 foo
130 aa27919ee4303cfd575e1fb932dd64d75aa08be4 644 foo
136 6128c0f33108e8cfbb4e0824d13ae48b466d7280 644 quux
131 6128c0f33108e8cfbb4e0824d13ae48b466d7280 644 quux
137
132
138 Everything should be clean now:
133 Everything should be clean now:
139
134
140 $ hg status
135 $ hg status
141
136
142 $ hg verify
137 $ hg verify
143 checking changesets
138 checking changesets
144 checking manifests
139 checking manifests
145 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
146 checking files
141 checking files
147 4 files, 4 changesets, 10 total revisions
142 4 files, 4 changesets, 10 total revisions
148
143
149 $ cd ..
144 $ cd ..
@@ -1,56 +1,56 b''
1 http://mercurial.selenic.com/bts/issue522
1 http://mercurial.selenic.com/bts/issue522
2
2
3 In the merge below, the file "foo" has the same contents in both
3 In the merge below, the file "foo" has the same contents in both
4 parents, but if we look at the file-level history, we'll notice that
4 parents, but if we look at the file-level history, we'll notice that
5 the version in p1 is an ancestor of the version in p2. This test makes
5 the version in p1 is an ancestor of the version in p2. This test makes
6 sure that we'll use the version from p2 in the manifest of the merge
6 sure that we'll use the version from p2 in the manifest of the merge
7 revision.
7 revision.
8
8
9 $ hg init
9 $ hg init
10
10
11 $ echo foo > foo
11 $ echo foo > foo
12 $ hg ci -qAm 'add foo'
12 $ hg ci -qAm 'add foo'
13
13
14 $ echo bar >> foo
14 $ echo bar >> foo
15 $ hg ci -m 'change foo'
15 $ hg ci -m 'change foo'
16
16
17 $ hg backout -r tip -m 'backout changed foo'
17 $ hg backout -r tip -m 'backout changed foo'
18 reverting foo
18 reverting foo
19 changeset 2:4d9e78aaceee backs out changeset 1:b515023e500e
19 changeset 2:4d9e78aaceee backs out changeset 1:b515023e500e
20
20
21 $ hg up -C 0
21 $ hg up -C 0
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23
23
24 $ touch bar
24 $ touch bar
25 $ hg ci -qAm 'add bar'
25 $ hg ci -qAm 'add bar'
26
26
27 $ hg merge --debug
27 $ hg merge --debug
28 searching for copies back to rev 1
28 searching for copies back to rev 1
29 unmatched files in local:
29 unmatched files in local:
30 bar
30 bar
31 resolving manifests
31 resolving manifests
32 branchmerge: True, force: False, partial: False
32 branchmerge: True, force: False, partial: False
33 ancestor: bbd179dfa0a7, local: 71766447bdbb+, remote: 4d9e78aaceee
33 ancestor: bbd179dfa0a7, local: 71766447bdbb+, remote: 4d9e78aaceee
34 foo: remote is newer -> g
34 foo: remote is newer -> g
35 getting foo
35 getting foo
36 updating: foo 1/1 files (100.00%)
36 updating: foo 1/1 files (100.00%)
37 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 (branch merge, don't forget to commit)
38 (branch merge, don't forget to commit)
39
39
40 $ hg debugstate | grep foo
40 $ hg debugstate | grep foo
41 n 0 -2 unset foo
41 m 0 -2 unset foo
42
42
43 $ hg st -A foo
43 $ hg st -A foo
44 M foo
44 M foo
45
45
46 $ hg ci -m 'merge'
46 $ hg ci -m 'merge'
47
47
48 $ hg manifest --debug | grep foo
48 $ hg manifest --debug | grep foo
49 c6fc755d7e68f49f880599da29f15add41f42f5a 644 foo
49 c6fc755d7e68f49f880599da29f15add41f42f5a 644 foo
50
50
51 $ hg debugindex foo
51 $ hg debugindex foo
52 rev offset length ..... linkrev nodeid p1 p2 (re)
52 rev offset length ..... linkrev nodeid p1 p2 (re)
53 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
53 0 0 5 ..... 0 2ed2a3912a0b 000000000000 000000000000 (re)
54 1 5 9 ..... 1 6f4310b00b9a 2ed2a3912a0b 000000000000 (re)
54 1 5 9 ..... 1 6f4310b00b9a 2ed2a3912a0b 000000000000 (re)
55 2 14 5 ..... 2 c6fc755d7e68 6f4310b00b9a 000000000000 (re)
55 2 14 5 ..... 2 c6fc755d7e68 6f4310b00b9a 000000000000 (re)
56
56
@@ -1,114 +1,114 b''
1 $ hg init
1 $ hg init
2
2
3 $ echo foo > foo
3 $ echo foo > foo
4 $ echo bar > bar
4 $ echo bar > bar
5 $ hg ci -qAm 'add foo bar'
5 $ hg ci -qAm 'add foo bar'
6
6
7 $ echo foo2 >> foo
7 $ echo foo2 >> foo
8 $ echo bleh > bar
8 $ echo bleh > bar
9 $ hg ci -m 'change foo bar'
9 $ hg ci -m 'change foo bar'
10
10
11 $ hg up -qC 0
11 $ hg up -qC 0
12 $ hg mv foo foo1
12 $ hg mv foo foo1
13 $ echo foo1 > foo1
13 $ echo foo1 > foo1
14 $ hg cat foo >> foo1
14 $ hg cat foo >> foo1
15 $ hg ci -m 'mv foo foo1'
15 $ hg ci -m 'mv foo foo1'
16 created new head
16 created new head
17
17
18 $ hg merge
18 $ hg merge
19 merging foo1 and foo to foo1
19 merging foo1 and foo to foo1
20 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
20 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
21 (branch merge, don't forget to commit)
21 (branch merge, don't forget to commit)
22
22
23 $ hg debugstate --nodates
23 $ hg debugstate --nodates
24 n 0 -2 bar
24 m 0 -2 bar
25 m 644 14 foo1
25 m 644 14 foo1
26 copy: foo -> foo1
26 copy: foo -> foo1
27
27
28 $ hg st -q
28 $ hg st -q
29 M bar
29 M bar
30 M foo1
30 M foo1
31
31
32
32
33 Removing foo1 and bar:
33 Removing foo1 and bar:
34
34
35 $ cp foo1 F
35 $ cp foo1 F
36 $ cp bar B
36 $ cp bar B
37 $ hg rm -f foo1 bar
37 $ hg rm -f foo1 bar
38
38
39 $ hg debugstate --nodates
39 $ hg debugstate --nodates
40 r 0 -2 bar
40 r 0 -1 bar
41 r 0 -1 foo1
41 r 0 -1 foo1
42 copy: foo -> foo1
42 copy: foo -> foo1
43
43
44 $ hg st -qC
44 $ hg st -qC
45 R bar
45 R bar
46 R foo1
46 R foo1
47
47
48
48
49 Re-adding foo1 and bar:
49 Re-adding foo1 and bar:
50
50
51 $ cp F foo1
51 $ cp F foo1
52 $ cp B bar
52 $ cp B bar
53 $ hg add -v foo1 bar
53 $ hg add -v foo1 bar
54 adding bar
54 adding bar
55 adding foo1
55 adding foo1
56
56
57 $ hg debugstate --nodates
57 $ hg debugstate --nodates
58 n 0 -2 bar
58 m 644 5 bar
59 m 644 14 foo1
59 m 644 14 foo1
60 copy: foo -> foo1
60 copy: foo -> foo1
61
61
62 $ hg st -qC
62 $ hg st -qC
63 M bar
63 M bar
64 M foo1
64 M foo1
65 foo
65 foo
66
66
67
67
68 Reverting foo1 and bar:
68 Reverting foo1 and bar:
69
69
70 $ hg revert -vr . foo1 bar
70 $ hg revert -vr . foo1 bar
71 saving current version of bar as bar.orig
71 saving current version of bar as bar.orig
72 reverting bar
72 reverting bar
73 saving current version of foo1 as foo1.orig
73 saving current version of foo1 as foo1.orig
74 reverting foo1
74 reverting foo1
75
75
76 $ hg debugstate --nodates
76 $ hg debugstate --nodates
77 n 0 -2 bar
77 m 644 5 bar
78 m 644 14 foo1
78 m 644 14 foo1
79 copy: foo -> foo1
79 copy: foo -> foo1
80
80
81 $ hg st -qC
81 $ hg st -qC
82 M bar
82 M bar
83 M foo1
83 M foo1
84 foo
84 foo
85
85
86 $ hg diff
86 $ hg diff
87
87
88 Merge should not overwrite local file that is untracked after remove
88 Merge should not overwrite local file that is untracked after remove
89
89
90 $ rm *
90 $ rm *
91 $ hg up -qC
91 $ hg up -qC
92 $ hg rm bar
92 $ hg rm bar
93 $ hg ci -m 'remove bar'
93 $ hg ci -m 'remove bar'
94 $ echo 'memories of buried pirate treasure' > bar
94 $ echo 'memories of buried pirate treasure' > bar
95 $ hg merge
95 $ hg merge
96 bar: untracked file differs
96 bar: untracked file differs
97 abort: untracked files in working directory differ from files in requested revision
97 abort: untracked files in working directory differ from files in requested revision
98 [255]
98 [255]
99 $ cat bar
99 $ cat bar
100 memories of buried pirate treasure
100 memories of buried pirate treasure
101
101
102 Those who use force will lose
102 Those who use force will lose
103
103
104 $ hg merge -f
104 $ hg merge -f
105 remote changed bar which local deleted
105 remote changed bar which local deleted
106 use (c)hanged version or leave (d)eleted? c
106 use (c)hanged version or leave (d)eleted? c
107 merging foo1 and foo to foo1
107 merging foo1 and foo to foo1
108 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
108 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
109 (branch merge, don't forget to commit)
109 (branch merge, don't forget to commit)
110 $ cat bar
110 $ cat bar
111 bleh
111 bleh
112 $ hg st
112 $ hg st
113 M bar
113 M bar
114 M foo1
114 M foo1
General Comments 0
You need to be logged in to leave comments. Login now