##// END OF EJS Templates
dirstate: delay writing out to ensure timestamp of each entries explicitly...
FUJIWARA Katsunori -
r21931:89b809fa stable
parent child Browse files
Show More
@@ -1,867 +1,875 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
10 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
11 import os, stat, errno, gc
11 import os, stat, errno, gc
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14 filecache = scmutil.filecache
14 filecache = scmutil.filecache
15 _rangemask = 0x7fffffff
15 _rangemask = 0x7fffffff
16
16
17 dirstatetuple = parsers.dirstatetuple
17 dirstatetuple = parsers.dirstatetuple
18
18
19 class repocache(filecache):
19 class repocache(filecache):
20 """filecache for files in .hg/"""
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
22 return obj._opener.join(fname)
23
23
24 class rootcache(filecache):
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
26 def join(self, obj, fname):
27 return obj._join(fname)
27 return obj._join(fname)
28
28
29 class dirstate(object):
29 class dirstate(object):
30
30
31 def __init__(self, opener, ui, root, validate):
31 def __init__(self, opener, ui, root, validate):
32 '''Create a new dirstate object.
32 '''Create a new dirstate object.
33
33
34 opener is an open()-like callable that can be used to open the
34 opener is an open()-like callable that can be used to open the
35 dirstate file; root is the root of the directory tracked by
35 dirstate file; root is the root of the directory tracked by
36 the dirstate.
36 the dirstate.
37 '''
37 '''
38 self._opener = opener
38 self._opener = opener
39 self._validate = validate
39 self._validate = validate
40 self._root = root
40 self._root = root
41 self._rootdir = os.path.join(root, '')
41 self._rootdir = os.path.join(root, '')
42 self._dirty = False
42 self._dirty = False
43 self._dirtypl = False
43 self._dirtypl = False
44 self._lastnormaltime = 0
44 self._lastnormaltime = 0
45 self._ui = ui
45 self._ui = ui
46 self._filecache = {}
46 self._filecache = {}
47
47
48 @propertycache
48 @propertycache
49 def _map(self):
49 def _map(self):
50 '''Return the dirstate contents as a map from filename to
50 '''Return the dirstate contents as a map from filename to
51 (state, mode, size, time).'''
51 (state, mode, size, time).'''
52 self._read()
52 self._read()
53 return self._map
53 return self._map
54
54
55 @propertycache
55 @propertycache
56 def _copymap(self):
56 def _copymap(self):
57 self._read()
57 self._read()
58 return self._copymap
58 return self._copymap
59
59
60 @propertycache
60 @propertycache
61 def _foldmap(self):
61 def _foldmap(self):
62 f = {}
62 f = {}
63 for name, s in self._map.iteritems():
63 for name, s in self._map.iteritems():
64 if s[0] != 'r':
64 if s[0] != 'r':
65 f[util.normcase(name)] = name
65 f[util.normcase(name)] = name
66 for name in self._dirs:
66 for name in self._dirs:
67 f[util.normcase(name)] = name
67 f[util.normcase(name)] = name
68 f['.'] = '.' # prevents useless util.fspath() invocation
68 f['.'] = '.' # prevents useless util.fspath() invocation
69 return f
69 return f
70
70
71 @repocache('branch')
71 @repocache('branch')
72 def _branch(self):
72 def _branch(self):
73 try:
73 try:
74 return self._opener.read("branch").strip() or "default"
74 return self._opener.read("branch").strip() or "default"
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 return "default"
78 return "default"
79
79
80 @propertycache
80 @propertycache
81 def _pl(self):
81 def _pl(self):
82 try:
82 try:
83 fp = self._opener("dirstate")
83 fp = self._opener("dirstate")
84 st = fp.read(40)
84 st = fp.read(40)
85 fp.close()
85 fp.close()
86 l = len(st)
86 l = len(st)
87 if l == 40:
87 if l == 40:
88 return st[:20], st[20:40]
88 return st[:20], st[20:40]
89 elif l > 0 and l < 40:
89 elif l > 0 and l < 40:
90 raise util.Abort(_('working directory state appears damaged!'))
90 raise util.Abort(_('working directory state appears damaged!'))
91 except IOError, err:
91 except IOError, err:
92 if err.errno != errno.ENOENT:
92 if err.errno != errno.ENOENT:
93 raise
93 raise
94 return [nullid, nullid]
94 return [nullid, nullid]
95
95
96 @propertycache
96 @propertycache
97 def _dirs(self):
97 def _dirs(self):
98 return scmutil.dirs(self._map, 'r')
98 return scmutil.dirs(self._map, 'r')
99
99
100 def dirs(self):
100 def dirs(self):
101 return self._dirs
101 return self._dirs
102
102
103 @rootcache('.hgignore')
103 @rootcache('.hgignore')
104 def _ignore(self):
104 def _ignore(self):
105 files = [self._join('.hgignore')]
105 files = [self._join('.hgignore')]
106 for name, path in self._ui.configitems("ui"):
106 for name, path in self._ui.configitems("ui"):
107 if name == 'ignore' or name.startswith('ignore.'):
107 if name == 'ignore' or name.startswith('ignore.'):
108 files.append(util.expandpath(path))
108 files.append(util.expandpath(path))
109 return ignore.ignore(self._root, files, self._ui.warn)
109 return ignore.ignore(self._root, files, self._ui.warn)
110
110
111 @propertycache
111 @propertycache
112 def _slash(self):
112 def _slash(self):
113 return self._ui.configbool('ui', 'slash') and os.sep != '/'
113 return self._ui.configbool('ui', 'slash') and os.sep != '/'
114
114
115 @propertycache
115 @propertycache
116 def _checklink(self):
116 def _checklink(self):
117 return util.checklink(self._root)
117 return util.checklink(self._root)
118
118
119 @propertycache
119 @propertycache
120 def _checkexec(self):
120 def _checkexec(self):
121 return util.checkexec(self._root)
121 return util.checkexec(self._root)
122
122
123 @propertycache
123 @propertycache
124 def _checkcase(self):
124 def _checkcase(self):
125 return not util.checkcase(self._join('.hg'))
125 return not util.checkcase(self._join('.hg'))
126
126
127 def _join(self, f):
127 def _join(self, f):
128 # much faster than os.path.join()
128 # much faster than os.path.join()
129 # it's safe because f is always a relative path
129 # it's safe because f is always a relative path
130 return self._rootdir + f
130 return self._rootdir + f
131
131
132 def flagfunc(self, buildfallback):
132 def flagfunc(self, buildfallback):
133 if self._checklink and self._checkexec:
133 if self._checklink and self._checkexec:
134 def f(x):
134 def f(x):
135 try:
135 try:
136 st = os.lstat(self._join(x))
136 st = os.lstat(self._join(x))
137 if util.statislink(st):
137 if util.statislink(st):
138 return 'l'
138 return 'l'
139 if util.statisexec(st):
139 if util.statisexec(st):
140 return 'x'
140 return 'x'
141 except OSError:
141 except OSError:
142 pass
142 pass
143 return ''
143 return ''
144 return f
144 return f
145
145
146 fallback = buildfallback()
146 fallback = buildfallback()
147 if self._checklink:
147 if self._checklink:
148 def f(x):
148 def f(x):
149 if os.path.islink(self._join(x)):
149 if os.path.islink(self._join(x)):
150 return 'l'
150 return 'l'
151 if 'x' in fallback(x):
151 if 'x' in fallback(x):
152 return 'x'
152 return 'x'
153 return ''
153 return ''
154 return f
154 return f
155 if self._checkexec:
155 if self._checkexec:
156 def f(x):
156 def f(x):
157 if 'l' in fallback(x):
157 if 'l' in fallback(x):
158 return 'l'
158 return 'l'
159 if util.isexec(self._join(x)):
159 if util.isexec(self._join(x)):
160 return 'x'
160 return 'x'
161 return ''
161 return ''
162 return f
162 return f
163 else:
163 else:
164 return fallback
164 return fallback
165
165
166 @propertycache
166 @propertycache
167 def _cwd(self):
167 def _cwd(self):
168 return os.getcwd()
168 return os.getcwd()
169
169
170 def getcwd(self):
170 def getcwd(self):
171 cwd = self._cwd
171 cwd = self._cwd
172 if cwd == self._root:
172 if cwd == self._root:
173 return ''
173 return ''
174 # self._root ends with a path separator if self._root is '/' or 'C:\'
174 # self._root ends with a path separator if self._root is '/' or 'C:\'
175 rootsep = self._root
175 rootsep = self._root
176 if not util.endswithsep(rootsep):
176 if not util.endswithsep(rootsep):
177 rootsep += os.sep
177 rootsep += os.sep
178 if cwd.startswith(rootsep):
178 if cwd.startswith(rootsep):
179 return cwd[len(rootsep):]
179 return cwd[len(rootsep):]
180 else:
180 else:
181 # we're outside the repo. return an absolute path.
181 # we're outside the repo. return an absolute path.
182 return cwd
182 return cwd
183
183
184 def pathto(self, f, cwd=None):
184 def pathto(self, f, cwd=None):
185 if cwd is None:
185 if cwd is None:
186 cwd = self.getcwd()
186 cwd = self.getcwd()
187 path = util.pathto(self._root, cwd, f)
187 path = util.pathto(self._root, cwd, f)
188 if self._slash:
188 if self._slash:
189 return util.pconvert(path)
189 return util.pconvert(path)
190 return path
190 return path
191
191
192 def __getitem__(self, key):
192 def __getitem__(self, key):
193 '''Return the current state of key (a filename) in the dirstate.
193 '''Return the current state of key (a filename) in the dirstate.
194
194
195 States are:
195 States are:
196 n normal
196 n normal
197 m needs merging
197 m needs merging
198 r marked for removal
198 r marked for removal
199 a marked for addition
199 a marked for addition
200 ? not tracked
200 ? not tracked
201 '''
201 '''
202 return self._map.get(key, ("?",))[0]
202 return self._map.get(key, ("?",))[0]
203
203
204 def __contains__(self, key):
204 def __contains__(self, key):
205 return key in self._map
205 return key in self._map
206
206
207 def __iter__(self):
207 def __iter__(self):
208 for x in sorted(self._map):
208 for x in sorted(self._map):
209 yield x
209 yield x
210
210
211 def iteritems(self):
211 def iteritems(self):
212 return self._map.iteritems()
212 return self._map.iteritems()
213
213
214 def parents(self):
214 def parents(self):
215 return [self._validate(p) for p in self._pl]
215 return [self._validate(p) for p in self._pl]
216
216
217 def p1(self):
217 def p1(self):
218 return self._validate(self._pl[0])
218 return self._validate(self._pl[0])
219
219
220 def p2(self):
220 def p2(self):
221 return self._validate(self._pl[1])
221 return self._validate(self._pl[1])
222
222
223 def branch(self):
223 def branch(self):
224 return encoding.tolocal(self._branch)
224 return encoding.tolocal(self._branch)
225
225
226 def setparents(self, p1, p2=nullid):
226 def setparents(self, p1, p2=nullid):
227 """Set dirstate parents to p1 and p2.
227 """Set dirstate parents to p1 and p2.
228
228
229 When moving from two parents to one, 'm' merged entries a
229 When moving from two parents to one, 'm' merged entries a
230 adjusted to normal and previous copy records discarded and
230 adjusted to normal and previous copy records discarded and
231 returned by the call.
231 returned by the call.
232
232
233 See localrepo.setparents()
233 See localrepo.setparents()
234 """
234 """
235 self._dirty = self._dirtypl = True
235 self._dirty = self._dirtypl = True
236 oldp2 = self._pl[1]
236 oldp2 = self._pl[1]
237 self._pl = p1, p2
237 self._pl = p1, p2
238 copies = {}
238 copies = {}
239 if oldp2 != nullid and p2 == nullid:
239 if oldp2 != nullid and p2 == nullid:
240 # Discard 'm' markers when moving away from a merge state
240 # Discard 'm' markers when moving away from a merge state
241 for f, s in self._map.iteritems():
241 for f, s in self._map.iteritems():
242 if s[0] == 'm':
242 if s[0] == 'm':
243 if f in self._copymap:
243 if f in self._copymap:
244 copies[f] = self._copymap[f]
244 copies[f] = self._copymap[f]
245 self.normallookup(f)
245 self.normallookup(f)
246 return copies
246 return copies
247
247
248 def setbranch(self, branch):
248 def setbranch(self, branch):
249 self._branch = encoding.fromlocal(branch)
249 self._branch = encoding.fromlocal(branch)
250 f = self._opener('branch', 'w', atomictemp=True)
250 f = self._opener('branch', 'w', atomictemp=True)
251 try:
251 try:
252 f.write(self._branch + '\n')
252 f.write(self._branch + '\n')
253 f.close()
253 f.close()
254
254
255 # make sure filecache has the correct stat info for _branch after
255 # make sure filecache has the correct stat info for _branch after
256 # replacing the underlying file
256 # replacing the underlying file
257 ce = self._filecache['_branch']
257 ce = self._filecache['_branch']
258 if ce:
258 if ce:
259 ce.refresh()
259 ce.refresh()
260 except: # re-raises
260 except: # re-raises
261 f.discard()
261 f.discard()
262 raise
262 raise
263
263
264 def _read(self):
264 def _read(self):
265 self._map = {}
265 self._map = {}
266 self._copymap = {}
266 self._copymap = {}
267 try:
267 try:
268 st = self._opener.read("dirstate")
268 st = self._opener.read("dirstate")
269 except IOError, err:
269 except IOError, err:
270 if err.errno != errno.ENOENT:
270 if err.errno != errno.ENOENT:
271 raise
271 raise
272 return
272 return
273 if not st:
273 if not st:
274 return
274 return
275
275
276 # Python's garbage collector triggers a GC each time a certain number
276 # Python's garbage collector triggers a GC each time a certain number
277 # of container objects (the number being defined by
277 # of container objects (the number being defined by
278 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
278 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
279 # for each file in the dirstate. The C version then immediately marks
279 # for each file in the dirstate. The C version then immediately marks
280 # them as not to be tracked by the collector. However, this has no
280 # them as not to be tracked by the collector. However, this has no
281 # effect on when GCs are triggered, only on what objects the GC looks
281 # effect on when GCs are triggered, only on what objects the GC looks
282 # into. This means that O(number of files) GCs are unavoidable.
282 # into. This means that O(number of files) GCs are unavoidable.
283 # Depending on when in the process's lifetime the dirstate is parsed,
283 # Depending on when in the process's lifetime the dirstate is parsed,
284 # this can get very expensive. As a workaround, disable GC while
284 # this can get very expensive. As a workaround, disable GC while
285 # parsing the dirstate.
285 # parsing the dirstate.
286 gcenabled = gc.isenabled()
286 gcenabled = gc.isenabled()
287 gc.disable()
287 gc.disable()
288 try:
288 try:
289 p = parsers.parse_dirstate(self._map, self._copymap, st)
289 p = parsers.parse_dirstate(self._map, self._copymap, st)
290 finally:
290 finally:
291 if gcenabled:
291 if gcenabled:
292 gc.enable()
292 gc.enable()
293 if not self._dirtypl:
293 if not self._dirtypl:
294 self._pl = p
294 self._pl = p
295
295
296 def invalidate(self):
296 def invalidate(self):
297 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
297 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
298 "_ignore"):
298 "_ignore"):
299 if a in self.__dict__:
299 if a in self.__dict__:
300 delattr(self, a)
300 delattr(self, a)
301 self._lastnormaltime = 0
301 self._lastnormaltime = 0
302 self._dirty = False
302 self._dirty = False
303
303
304 def copy(self, source, dest):
304 def copy(self, source, dest):
305 """Mark dest as a copy of source. Unmark dest if source is None."""
305 """Mark dest as a copy of source. Unmark dest if source is None."""
306 if source == dest:
306 if source == dest:
307 return
307 return
308 self._dirty = True
308 self._dirty = True
309 if source is not None:
309 if source is not None:
310 self._copymap[dest] = source
310 self._copymap[dest] = source
311 elif dest in self._copymap:
311 elif dest in self._copymap:
312 del self._copymap[dest]
312 del self._copymap[dest]
313
313
314 def copied(self, file):
314 def copied(self, file):
315 return self._copymap.get(file, None)
315 return self._copymap.get(file, None)
316
316
317 def copies(self):
317 def copies(self):
318 return self._copymap
318 return self._copymap
319
319
320 def _droppath(self, f):
320 def _droppath(self, f):
321 if self[f] not in "?r" and "_dirs" in self.__dict__:
321 if self[f] not in "?r" and "_dirs" in self.__dict__:
322 self._dirs.delpath(f)
322 self._dirs.delpath(f)
323
323
324 def _addpath(self, f, state, mode, size, mtime):
324 def _addpath(self, f, state, mode, size, mtime):
325 oldstate = self[f]
325 oldstate = self[f]
326 if state == 'a' or oldstate == 'r':
326 if state == 'a' or oldstate == 'r':
327 scmutil.checkfilename(f)
327 scmutil.checkfilename(f)
328 if f in self._dirs:
328 if f in self._dirs:
329 raise util.Abort(_('directory %r already in dirstate') % f)
329 raise util.Abort(_('directory %r already in dirstate') % f)
330 # shadows
330 # shadows
331 for d in scmutil.finddirs(f):
331 for d in scmutil.finddirs(f):
332 if d in self._dirs:
332 if d in self._dirs:
333 break
333 break
334 if d in self._map and self[d] != 'r':
334 if d in self._map and self[d] != 'r':
335 raise util.Abort(
335 raise util.Abort(
336 _('file %r in dirstate clashes with %r') % (d, f))
336 _('file %r in dirstate clashes with %r') % (d, f))
337 if oldstate in "?r" and "_dirs" in self.__dict__:
337 if oldstate in "?r" and "_dirs" in self.__dict__:
338 self._dirs.addpath(f)
338 self._dirs.addpath(f)
339 self._dirty = True
339 self._dirty = True
340 self._map[f] = dirstatetuple(state, mode, size, mtime)
340 self._map[f] = dirstatetuple(state, mode, size, mtime)
341
341
342 def normal(self, f):
342 def normal(self, f):
343 '''Mark a file normal and clean.'''
343 '''Mark a file normal and clean.'''
344 s = os.lstat(self._join(f))
344 s = os.lstat(self._join(f))
345 mtime = int(s.st_mtime)
345 mtime = int(s.st_mtime)
346 self._addpath(f, 'n', s.st_mode,
346 self._addpath(f, 'n', s.st_mode,
347 s.st_size & _rangemask, mtime & _rangemask)
347 s.st_size & _rangemask, mtime & _rangemask)
348 if f in self._copymap:
348 if f in self._copymap:
349 del self._copymap[f]
349 del self._copymap[f]
350 if mtime > self._lastnormaltime:
350 if mtime > self._lastnormaltime:
351 # Remember the most recent modification timeslot for status(),
351 # Remember the most recent modification timeslot for status(),
352 # to make sure we won't miss future size-preserving file content
352 # to make sure we won't miss future size-preserving file content
353 # modifications that happen within the same timeslot.
353 # modifications that happen within the same timeslot.
354 self._lastnormaltime = mtime
354 self._lastnormaltime = mtime
355
355
356 def normallookup(self, f):
356 def normallookup(self, f):
357 '''Mark a file normal, but possibly dirty.'''
357 '''Mark a file normal, but possibly dirty.'''
358 if self._pl[1] != nullid and f in self._map:
358 if self._pl[1] != nullid and f in self._map:
359 # if there is a merge going on and the file was either
359 # if there is a merge going on and the file was either
360 # in state 'm' (-1) or coming from other parent (-2) before
360 # in state 'm' (-1) or coming from other parent (-2) before
361 # being removed, restore that state.
361 # being removed, restore that state.
362 entry = self._map[f]
362 entry = self._map[f]
363 if entry[0] == 'r' and entry[2] in (-1, -2):
363 if entry[0] == 'r' and entry[2] in (-1, -2):
364 source = self._copymap.get(f)
364 source = self._copymap.get(f)
365 if entry[2] == -1:
365 if entry[2] == -1:
366 self.merge(f)
366 self.merge(f)
367 elif entry[2] == -2:
367 elif entry[2] == -2:
368 self.otherparent(f)
368 self.otherparent(f)
369 if source:
369 if source:
370 self.copy(source, f)
370 self.copy(source, f)
371 return
371 return
372 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
372 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
373 return
373 return
374 self._addpath(f, 'n', 0, -1, -1)
374 self._addpath(f, 'n', 0, -1, -1)
375 if f in self._copymap:
375 if f in self._copymap:
376 del self._copymap[f]
376 del self._copymap[f]
377
377
378 def otherparent(self, f):
378 def otherparent(self, f):
379 '''Mark as coming from the other parent, always dirty.'''
379 '''Mark as coming from the other parent, always dirty.'''
380 if self._pl[1] == nullid:
380 if self._pl[1] == nullid:
381 raise util.Abort(_("setting %r to other parent "
381 raise util.Abort(_("setting %r to other parent "
382 "only allowed in merges") % f)
382 "only allowed in merges") % f)
383 self._addpath(f, 'n', 0, -2, -1)
383 self._addpath(f, 'n', 0, -2, -1)
384 if f in self._copymap:
384 if f in self._copymap:
385 del self._copymap[f]
385 del self._copymap[f]
386
386
387 def add(self, f):
387 def add(self, f):
388 '''Mark a file added.'''
388 '''Mark a file added.'''
389 self._addpath(f, 'a', 0, -1, -1)
389 self._addpath(f, 'a', 0, -1, -1)
390 if f in self._copymap:
390 if f in self._copymap:
391 del self._copymap[f]
391 del self._copymap[f]
392
392
393 def remove(self, f):
393 def remove(self, f):
394 '''Mark a file removed.'''
394 '''Mark a file removed.'''
395 self._dirty = True
395 self._dirty = True
396 self._droppath(f)
396 self._droppath(f)
397 size = 0
397 size = 0
398 if self._pl[1] != nullid and f in self._map:
398 if self._pl[1] != nullid and f in self._map:
399 # backup the previous state
399 # backup the previous state
400 entry = self._map[f]
400 entry = self._map[f]
401 if entry[0] == 'm': # merge
401 if entry[0] == 'm': # merge
402 size = -1
402 size = -1
403 elif entry[0] == 'n' and entry[2] == -2: # other parent
403 elif entry[0] == 'n' and entry[2] == -2: # other parent
404 size = -2
404 size = -2
405 self._map[f] = dirstatetuple('r', 0, size, 0)
405 self._map[f] = dirstatetuple('r', 0, size, 0)
406 if size == 0 and f in self._copymap:
406 if size == 0 and f in self._copymap:
407 del self._copymap[f]
407 del self._copymap[f]
408
408
409 def merge(self, f):
409 def merge(self, f):
410 '''Mark a file merged.'''
410 '''Mark a file merged.'''
411 if self._pl[1] == nullid:
411 if self._pl[1] == nullid:
412 return self.normallookup(f)
412 return self.normallookup(f)
413 s = os.lstat(self._join(f))
413 s = os.lstat(self._join(f))
414 self._addpath(f, 'm', s.st_mode,
414 self._addpath(f, 'm', s.st_mode,
415 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
415 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
416 if f in self._copymap:
416 if f in self._copymap:
417 del self._copymap[f]
417 del self._copymap[f]
418
418
419 def drop(self, f):
419 def drop(self, f):
420 '''Drop a file from the dirstate'''
420 '''Drop a file from the dirstate'''
421 if f in self._map:
421 if f in self._map:
422 self._dirty = True
422 self._dirty = True
423 self._droppath(f)
423 self._droppath(f)
424 del self._map[f]
424 del self._map[f]
425
425
426 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
426 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
427 normed = util.normcase(path)
427 normed = util.normcase(path)
428 folded = self._foldmap.get(normed, None)
428 folded = self._foldmap.get(normed, None)
429 if folded is None:
429 if folded is None:
430 if isknown:
430 if isknown:
431 folded = path
431 folded = path
432 else:
432 else:
433 if exists is None:
433 if exists is None:
434 exists = os.path.lexists(os.path.join(self._root, path))
434 exists = os.path.lexists(os.path.join(self._root, path))
435 if not exists:
435 if not exists:
436 # Maybe a path component exists
436 # Maybe a path component exists
437 if not ignoremissing and '/' in path:
437 if not ignoremissing and '/' in path:
438 d, f = path.rsplit('/', 1)
438 d, f = path.rsplit('/', 1)
439 d = self._normalize(d, isknown, ignoremissing, None)
439 d = self._normalize(d, isknown, ignoremissing, None)
440 folded = d + "/" + f
440 folded = d + "/" + f
441 else:
441 else:
442 # No path components, preserve original case
442 # No path components, preserve original case
443 folded = path
443 folded = path
444 else:
444 else:
445 # recursively normalize leading directory components
445 # recursively normalize leading directory components
446 # against dirstate
446 # against dirstate
447 if '/' in normed:
447 if '/' in normed:
448 d, f = normed.rsplit('/', 1)
448 d, f = normed.rsplit('/', 1)
449 d = self._normalize(d, isknown, ignoremissing, True)
449 d = self._normalize(d, isknown, ignoremissing, True)
450 r = self._root + "/" + d
450 r = self._root + "/" + d
451 folded = d + "/" + util.fspath(f, r)
451 folded = d + "/" + util.fspath(f, r)
452 else:
452 else:
453 folded = util.fspath(normed, self._root)
453 folded = util.fspath(normed, self._root)
454 self._foldmap[normed] = folded
454 self._foldmap[normed] = folded
455
455
456 return folded
456 return folded
457
457
458 def normalize(self, path, isknown=False, ignoremissing=False):
458 def normalize(self, path, isknown=False, ignoremissing=False):
459 '''
459 '''
460 normalize the case of a pathname when on a casefolding filesystem
460 normalize the case of a pathname when on a casefolding filesystem
461
461
462 isknown specifies whether the filename came from walking the
462 isknown specifies whether the filename came from walking the
463 disk, to avoid extra filesystem access.
463 disk, to avoid extra filesystem access.
464
464
465 If ignoremissing is True, missing path are returned
465 If ignoremissing is True, missing path are returned
466 unchanged. Otherwise, we try harder to normalize possibly
466 unchanged. Otherwise, we try harder to normalize possibly
467 existing path components.
467 existing path components.
468
468
469 The normalized case is determined based on the following precedence:
469 The normalized case is determined based on the following precedence:
470
470
471 - version of name already stored in the dirstate
471 - version of name already stored in the dirstate
472 - version of name stored on disk
472 - version of name stored on disk
473 - version provided via command arguments
473 - version provided via command arguments
474 '''
474 '''
475
475
476 if self._checkcase:
476 if self._checkcase:
477 return self._normalize(path, isknown, ignoremissing)
477 return self._normalize(path, isknown, ignoremissing)
478 return path
478 return path
479
479
480 def clear(self):
480 def clear(self):
481 self._map = {}
481 self._map = {}
482 if "_dirs" in self.__dict__:
482 if "_dirs" in self.__dict__:
483 delattr(self, "_dirs")
483 delattr(self, "_dirs")
484 self._copymap = {}
484 self._copymap = {}
485 self._pl = [nullid, nullid]
485 self._pl = [nullid, nullid]
486 self._lastnormaltime = 0
486 self._lastnormaltime = 0
487 self._dirty = True
487 self._dirty = True
488
488
489 def rebuild(self, parent, allfiles, changedfiles=None):
489 def rebuild(self, parent, allfiles, changedfiles=None):
490 changedfiles = changedfiles or allfiles
490 changedfiles = changedfiles or allfiles
491 oldmap = self._map
491 oldmap = self._map
492 self.clear()
492 self.clear()
493 for f in allfiles:
493 for f in allfiles:
494 if f not in changedfiles:
494 if f not in changedfiles:
495 self._map[f] = oldmap[f]
495 self._map[f] = oldmap[f]
496 else:
496 else:
497 if 'x' in allfiles.flags(f):
497 if 'x' in allfiles.flags(f):
498 self._map[f] = dirstatetuple('n', 0777, -1, 0)
498 self._map[f] = dirstatetuple('n', 0777, -1, 0)
499 else:
499 else:
500 self._map[f] = dirstatetuple('n', 0666, -1, 0)
500 self._map[f] = dirstatetuple('n', 0666, -1, 0)
501 self._pl = (parent, nullid)
501 self._pl = (parent, nullid)
502 self._dirty = True
502 self._dirty = True
503
503
504 def write(self):
504 def write(self):
505 if not self._dirty:
505 if not self._dirty:
506 return
506 return
507
508 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
509 # timestamp of each entries in dirstate, because of 'now > mtime'
510 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
511 if delaywrite:
512 import time # to avoid useless import
513 time.sleep(delaywrite)
514
507 st = self._opener("dirstate", "w", atomictemp=True)
515 st = self._opener("dirstate", "w", atomictemp=True)
508 # use the modification time of the newly created temporary file as the
516 # use the modification time of the newly created temporary file as the
509 # filesystem's notion of 'now'
517 # filesystem's notion of 'now'
510 now = util.fstat(st).st_mtime
518 now = util.fstat(st).st_mtime
511 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
519 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
512 st.close()
520 st.close()
513 self._lastnormaltime = 0
521 self._lastnormaltime = 0
514 self._dirty = self._dirtypl = False
522 self._dirty = self._dirtypl = False
515
523
516 def _dirignore(self, f):
524 def _dirignore(self, f):
517 if f == '.':
525 if f == '.':
518 return False
526 return False
519 if self._ignore(f):
527 if self._ignore(f):
520 return True
528 return True
521 for p in scmutil.finddirs(f):
529 for p in scmutil.finddirs(f):
522 if self._ignore(p):
530 if self._ignore(p):
523 return True
531 return True
524 return False
532 return False
525
533
526 def _walkexplicit(self, match, subrepos):
534 def _walkexplicit(self, match, subrepos):
527 '''Get stat data about the files explicitly specified by match.
535 '''Get stat data about the files explicitly specified by match.
528
536
529 Return a triple (results, dirsfound, dirsnotfound).
537 Return a triple (results, dirsfound, dirsnotfound).
530 - results is a mapping from filename to stat result. It also contains
538 - results is a mapping from filename to stat result. It also contains
531 listings mapping subrepos and .hg to None.
539 listings mapping subrepos and .hg to None.
532 - dirsfound is a list of files found to be directories.
540 - dirsfound is a list of files found to be directories.
533 - dirsnotfound is a list of files that the dirstate thinks are
541 - dirsnotfound is a list of files that the dirstate thinks are
534 directories and that were not found.'''
542 directories and that were not found.'''
535
543
536 def badtype(mode):
544 def badtype(mode):
537 kind = _('unknown')
545 kind = _('unknown')
538 if stat.S_ISCHR(mode):
546 if stat.S_ISCHR(mode):
539 kind = _('character device')
547 kind = _('character device')
540 elif stat.S_ISBLK(mode):
548 elif stat.S_ISBLK(mode):
541 kind = _('block device')
549 kind = _('block device')
542 elif stat.S_ISFIFO(mode):
550 elif stat.S_ISFIFO(mode):
543 kind = _('fifo')
551 kind = _('fifo')
544 elif stat.S_ISSOCK(mode):
552 elif stat.S_ISSOCK(mode):
545 kind = _('socket')
553 kind = _('socket')
546 elif stat.S_ISDIR(mode):
554 elif stat.S_ISDIR(mode):
547 kind = _('directory')
555 kind = _('directory')
548 return _('unsupported file type (type is %s)') % kind
556 return _('unsupported file type (type is %s)') % kind
549
557
550 matchedir = match.explicitdir
558 matchedir = match.explicitdir
551 badfn = match.bad
559 badfn = match.bad
552 dmap = self._map
560 dmap = self._map
553 normpath = util.normpath
561 normpath = util.normpath
554 lstat = os.lstat
562 lstat = os.lstat
555 getkind = stat.S_IFMT
563 getkind = stat.S_IFMT
556 dirkind = stat.S_IFDIR
564 dirkind = stat.S_IFDIR
557 regkind = stat.S_IFREG
565 regkind = stat.S_IFREG
558 lnkkind = stat.S_IFLNK
566 lnkkind = stat.S_IFLNK
559 join = self._join
567 join = self._join
560 dirsfound = []
568 dirsfound = []
561 foundadd = dirsfound.append
569 foundadd = dirsfound.append
562 dirsnotfound = []
570 dirsnotfound = []
563 notfoundadd = dirsnotfound.append
571 notfoundadd = dirsnotfound.append
564
572
565 if match.matchfn != match.exact and self._checkcase:
573 if match.matchfn != match.exact and self._checkcase:
566 normalize = self._normalize
574 normalize = self._normalize
567 else:
575 else:
568 normalize = None
576 normalize = None
569
577
570 files = sorted(match.files())
578 files = sorted(match.files())
571 subrepos.sort()
579 subrepos.sort()
572 i, j = 0, 0
580 i, j = 0, 0
573 while i < len(files) and j < len(subrepos):
581 while i < len(files) and j < len(subrepos):
574 subpath = subrepos[j] + "/"
582 subpath = subrepos[j] + "/"
575 if files[i] < subpath:
583 if files[i] < subpath:
576 i += 1
584 i += 1
577 continue
585 continue
578 while i < len(files) and files[i].startswith(subpath):
586 while i < len(files) and files[i].startswith(subpath):
579 del files[i]
587 del files[i]
580 j += 1
588 j += 1
581
589
582 if not files or '.' in files:
590 if not files or '.' in files:
583 files = ['']
591 files = ['']
584 results = dict.fromkeys(subrepos)
592 results = dict.fromkeys(subrepos)
585 results['.hg'] = None
593 results['.hg'] = None
586
594
587 for ff in files:
595 for ff in files:
588 if normalize:
596 if normalize:
589 nf = normalize(normpath(ff), False, True)
597 nf = normalize(normpath(ff), False, True)
590 else:
598 else:
591 nf = normpath(ff)
599 nf = normpath(ff)
592 if nf in results:
600 if nf in results:
593 continue
601 continue
594
602
595 try:
603 try:
596 st = lstat(join(nf))
604 st = lstat(join(nf))
597 kind = getkind(st.st_mode)
605 kind = getkind(st.st_mode)
598 if kind == dirkind:
606 if kind == dirkind:
599 if nf in dmap:
607 if nf in dmap:
600 # file replaced by dir on disk but still in dirstate
608 # file replaced by dir on disk but still in dirstate
601 results[nf] = None
609 results[nf] = None
602 if matchedir:
610 if matchedir:
603 matchedir(nf)
611 matchedir(nf)
604 foundadd(nf)
612 foundadd(nf)
605 elif kind == regkind or kind == lnkkind:
613 elif kind == regkind or kind == lnkkind:
606 results[nf] = st
614 results[nf] = st
607 else:
615 else:
608 badfn(ff, badtype(kind))
616 badfn(ff, badtype(kind))
609 if nf in dmap:
617 if nf in dmap:
610 results[nf] = None
618 results[nf] = None
611 except OSError, inst: # nf not found on disk - it is dirstate only
619 except OSError, inst: # nf not found on disk - it is dirstate only
612 if nf in dmap: # does it exactly match a missing file?
620 if nf in dmap: # does it exactly match a missing file?
613 results[nf] = None
621 results[nf] = None
614 else: # does it match a missing directory?
622 else: # does it match a missing directory?
615 prefix = nf + "/"
623 prefix = nf + "/"
616 for fn in dmap:
624 for fn in dmap:
617 if fn.startswith(prefix):
625 if fn.startswith(prefix):
618 if matchedir:
626 if matchedir:
619 matchedir(nf)
627 matchedir(nf)
620 notfoundadd(nf)
628 notfoundadd(nf)
621 break
629 break
622 else:
630 else:
623 badfn(ff, inst.strerror)
631 badfn(ff, inst.strerror)
624
632
625 return results, dirsfound, dirsnotfound
633 return results, dirsfound, dirsnotfound
626
634
627 def walk(self, match, subrepos, unknown, ignored, full=True):
635 def walk(self, match, subrepos, unknown, ignored, full=True):
628 '''
636 '''
629 Walk recursively through the directory tree, finding all files
637 Walk recursively through the directory tree, finding all files
630 matched by match.
638 matched by match.
631
639
632 If full is False, maybe skip some known-clean files.
640 If full is False, maybe skip some known-clean files.
633
641
634 Return a dict mapping filename to stat-like object (either
642 Return a dict mapping filename to stat-like object (either
635 mercurial.osutil.stat instance or return value of os.stat()).
643 mercurial.osutil.stat instance or return value of os.stat()).
636
644
637 '''
645 '''
638 # full is a flag that extensions that hook into walk can use -- this
646 # full is a flag that extensions that hook into walk can use -- this
639 # implementation doesn't use it at all. This satisfies the contract
647 # implementation doesn't use it at all. This satisfies the contract
640 # because we only guarantee a "maybe".
648 # because we only guarantee a "maybe".
641
649
642 if ignored:
650 if ignored:
643 ignore = util.never
651 ignore = util.never
644 dirignore = util.never
652 dirignore = util.never
645 elif unknown:
653 elif unknown:
646 ignore = self._ignore
654 ignore = self._ignore
647 dirignore = self._dirignore
655 dirignore = self._dirignore
648 else:
656 else:
649 # if not unknown and not ignored, drop dir recursion and step 2
657 # if not unknown and not ignored, drop dir recursion and step 2
650 ignore = util.always
658 ignore = util.always
651 dirignore = util.always
659 dirignore = util.always
652
660
653 matchfn = match.matchfn
661 matchfn = match.matchfn
654 matchalways = match.always()
662 matchalways = match.always()
655 matchtdir = match.traversedir
663 matchtdir = match.traversedir
656 dmap = self._map
664 dmap = self._map
657 listdir = osutil.listdir
665 listdir = osutil.listdir
658 lstat = os.lstat
666 lstat = os.lstat
659 dirkind = stat.S_IFDIR
667 dirkind = stat.S_IFDIR
660 regkind = stat.S_IFREG
668 regkind = stat.S_IFREG
661 lnkkind = stat.S_IFLNK
669 lnkkind = stat.S_IFLNK
662 join = self._join
670 join = self._join
663
671
664 exact = skipstep3 = False
672 exact = skipstep3 = False
665 if matchfn == match.exact: # match.exact
673 if matchfn == match.exact: # match.exact
666 exact = True
674 exact = True
667 dirignore = util.always # skip step 2
675 dirignore = util.always # skip step 2
668 elif match.files() and not match.anypats(): # match.match, no patterns
676 elif match.files() and not match.anypats(): # match.match, no patterns
669 skipstep3 = True
677 skipstep3 = True
670
678
671 if not exact and self._checkcase:
679 if not exact and self._checkcase:
672 normalize = self._normalize
680 normalize = self._normalize
673 skipstep3 = False
681 skipstep3 = False
674 else:
682 else:
675 normalize = None
683 normalize = None
676
684
677 # step 1: find all explicit files
685 # step 1: find all explicit files
678 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
686 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
679
687
680 skipstep3 = skipstep3 and not (work or dirsnotfound)
688 skipstep3 = skipstep3 and not (work or dirsnotfound)
681 work = [d for d in work if not dirignore(d)]
689 work = [d for d in work if not dirignore(d)]
682 wadd = work.append
690 wadd = work.append
683
691
684 # step 2: visit subdirectories
692 # step 2: visit subdirectories
685 while work:
693 while work:
686 nd = work.pop()
694 nd = work.pop()
687 skip = None
695 skip = None
688 if nd == '.':
696 if nd == '.':
689 nd = ''
697 nd = ''
690 else:
698 else:
691 skip = '.hg'
699 skip = '.hg'
692 try:
700 try:
693 entries = listdir(join(nd), stat=True, skip=skip)
701 entries = listdir(join(nd), stat=True, skip=skip)
694 except OSError, inst:
702 except OSError, inst:
695 if inst.errno in (errno.EACCES, errno.ENOENT):
703 if inst.errno in (errno.EACCES, errno.ENOENT):
696 match.bad(self.pathto(nd), inst.strerror)
704 match.bad(self.pathto(nd), inst.strerror)
697 continue
705 continue
698 raise
706 raise
699 for f, kind, st in entries:
707 for f, kind, st in entries:
700 if normalize:
708 if normalize:
701 nf = normalize(nd and (nd + "/" + f) or f, True, True)
709 nf = normalize(nd and (nd + "/" + f) or f, True, True)
702 else:
710 else:
703 nf = nd and (nd + "/" + f) or f
711 nf = nd and (nd + "/" + f) or f
704 if nf not in results:
712 if nf not in results:
705 if kind == dirkind:
713 if kind == dirkind:
706 if not ignore(nf):
714 if not ignore(nf):
707 if matchtdir:
715 if matchtdir:
708 matchtdir(nf)
716 matchtdir(nf)
709 wadd(nf)
717 wadd(nf)
710 if nf in dmap and (matchalways or matchfn(nf)):
718 if nf in dmap and (matchalways or matchfn(nf)):
711 results[nf] = None
719 results[nf] = None
712 elif kind == regkind or kind == lnkkind:
720 elif kind == regkind or kind == lnkkind:
713 if nf in dmap:
721 if nf in dmap:
714 if matchalways or matchfn(nf):
722 if matchalways or matchfn(nf):
715 results[nf] = st
723 results[nf] = st
716 elif (matchalways or matchfn(nf)) and not ignore(nf):
724 elif (matchalways or matchfn(nf)) and not ignore(nf):
717 results[nf] = st
725 results[nf] = st
718 elif nf in dmap and (matchalways or matchfn(nf)):
726 elif nf in dmap and (matchalways or matchfn(nf)):
719 results[nf] = None
727 results[nf] = None
720
728
721 for s in subrepos:
729 for s in subrepos:
722 del results[s]
730 del results[s]
723 del results['.hg']
731 del results['.hg']
724
732
725 # step 3: visit remaining files from dmap
733 # step 3: visit remaining files from dmap
726 if not skipstep3 and not exact:
734 if not skipstep3 and not exact:
727 # If a dmap file is not in results yet, it was either
735 # If a dmap file is not in results yet, it was either
728 # a) not matching matchfn b) ignored, c) missing, or d) under a
736 # a) not matching matchfn b) ignored, c) missing, or d) under a
729 # symlink directory.
737 # symlink directory.
730 if not results and matchalways:
738 if not results and matchalways:
731 visit = dmap.keys()
739 visit = dmap.keys()
732 else:
740 else:
733 visit = [f for f in dmap if f not in results and matchfn(f)]
741 visit = [f for f in dmap if f not in results and matchfn(f)]
734 visit.sort()
742 visit.sort()
735
743
736 if unknown:
744 if unknown:
737 # unknown == True means we walked all dirs under the roots
745 # unknown == True means we walked all dirs under the roots
738 # that wasn't ignored, and everything that matched was stat'ed
746 # that wasn't ignored, and everything that matched was stat'ed
739 # and is already in results.
747 # and is already in results.
740 # The rest must thus be ignored or under a symlink.
748 # The rest must thus be ignored or under a symlink.
741 audit_path = pathutil.pathauditor(self._root)
749 audit_path = pathutil.pathauditor(self._root)
742
750
743 for nf in iter(visit):
751 for nf in iter(visit):
744 # Report ignored items in the dmap as long as they are not
752 # Report ignored items in the dmap as long as they are not
745 # under a symlink directory.
753 # under a symlink directory.
746 if audit_path.check(nf):
754 if audit_path.check(nf):
747 try:
755 try:
748 results[nf] = lstat(join(nf))
756 results[nf] = lstat(join(nf))
749 # file was just ignored, no links, and exists
757 # file was just ignored, no links, and exists
750 except OSError:
758 except OSError:
751 # file doesn't exist
759 # file doesn't exist
752 results[nf] = None
760 results[nf] = None
753 else:
761 else:
754 # It's either missing or under a symlink directory
762 # It's either missing or under a symlink directory
755 # which we in this case report as missing
763 # which we in this case report as missing
756 results[nf] = None
764 results[nf] = None
757 else:
765 else:
758 # We may not have walked the full directory tree above,
766 # We may not have walked the full directory tree above,
759 # so stat and check everything we missed.
767 # so stat and check everything we missed.
760 nf = iter(visit).next
768 nf = iter(visit).next
761 for st in util.statfiles([join(i) for i in visit]):
769 for st in util.statfiles([join(i) for i in visit]):
762 results[nf()] = st
770 results[nf()] = st
763 return results
771 return results
764
772
765 def status(self, match, subrepos, ignored, clean, unknown):
773 def status(self, match, subrepos, ignored, clean, unknown):
766 '''Determine the status of the working copy relative to the
774 '''Determine the status of the working copy relative to the
767 dirstate and return a tuple of lists (unsure, modified, added,
775 dirstate and return a tuple of lists (unsure, modified, added,
768 removed, deleted, unknown, ignored, clean), where:
776 removed, deleted, unknown, ignored, clean), where:
769
777
770 unsure:
778 unsure:
771 files that might have been modified since the dirstate was
779 files that might have been modified since the dirstate was
772 written, but need to be read to be sure (size is the same
780 written, but need to be read to be sure (size is the same
773 but mtime differs)
781 but mtime differs)
774 modified:
782 modified:
775 files that have definitely been modified since the dirstate
783 files that have definitely been modified since the dirstate
776 was written (different size or mode)
784 was written (different size or mode)
777 added:
785 added:
778 files that have been explicitly added with hg add
786 files that have been explicitly added with hg add
779 removed:
787 removed:
780 files that have been explicitly removed with hg remove
788 files that have been explicitly removed with hg remove
781 deleted:
789 deleted:
782 files that have been deleted through other means ("missing")
790 files that have been deleted through other means ("missing")
783 unknown:
791 unknown:
784 files not in the dirstate that are not ignored
792 files not in the dirstate that are not ignored
785 ignored:
793 ignored:
786 files not in the dirstate that are ignored
794 files not in the dirstate that are ignored
787 (by _dirignore())
795 (by _dirignore())
788 clean:
796 clean:
789 files that have definitely not been modified since the
797 files that have definitely not been modified since the
790 dirstate was written
798 dirstate was written
791 '''
799 '''
792 listignored, listclean, listunknown = ignored, clean, unknown
800 listignored, listclean, listunknown = ignored, clean, unknown
793 lookup, modified, added, unknown, ignored = [], [], [], [], []
801 lookup, modified, added, unknown, ignored = [], [], [], [], []
794 removed, deleted, clean = [], [], []
802 removed, deleted, clean = [], [], []
795
803
796 dmap = self._map
804 dmap = self._map
797 ladd = lookup.append # aka "unsure"
805 ladd = lookup.append # aka "unsure"
798 madd = modified.append
806 madd = modified.append
799 aadd = added.append
807 aadd = added.append
800 uadd = unknown.append
808 uadd = unknown.append
801 iadd = ignored.append
809 iadd = ignored.append
802 radd = removed.append
810 radd = removed.append
803 dadd = deleted.append
811 dadd = deleted.append
804 cadd = clean.append
812 cadd = clean.append
805 mexact = match.exact
813 mexact = match.exact
806 dirignore = self._dirignore
814 dirignore = self._dirignore
807 checkexec = self._checkexec
815 checkexec = self._checkexec
808 copymap = self._copymap
816 copymap = self._copymap
809 lastnormaltime = self._lastnormaltime
817 lastnormaltime = self._lastnormaltime
810
818
811 # We need to do full walks when either
819 # We need to do full walks when either
812 # - we're listing all clean files, or
820 # - we're listing all clean files, or
813 # - match.traversedir does something, because match.traversedir should
821 # - match.traversedir does something, because match.traversedir should
814 # be called for every dir in the working dir
822 # be called for every dir in the working dir
815 full = listclean or match.traversedir is not None
823 full = listclean or match.traversedir is not None
816 for fn, st in self.walk(match, subrepos, listunknown, listignored,
824 for fn, st in self.walk(match, subrepos, listunknown, listignored,
817 full=full).iteritems():
825 full=full).iteritems():
818 if fn not in dmap:
826 if fn not in dmap:
819 if (listignored or mexact(fn)) and dirignore(fn):
827 if (listignored or mexact(fn)) and dirignore(fn):
820 if listignored:
828 if listignored:
821 iadd(fn)
829 iadd(fn)
822 else:
830 else:
823 uadd(fn)
831 uadd(fn)
824 continue
832 continue
825
833
826 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
834 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
827 # written like that for performance reasons. dmap[fn] is not a
835 # written like that for performance reasons. dmap[fn] is not a
828 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
836 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
829 # opcode has fast paths when the value to be unpacked is a tuple or
837 # opcode has fast paths when the value to be unpacked is a tuple or
830 # a list, but falls back to creating a full-fledged iterator in
838 # a list, but falls back to creating a full-fledged iterator in
831 # general. That is much slower than simply accessing and storing the
839 # general. That is much slower than simply accessing and storing the
832 # tuple members one by one.
840 # tuple members one by one.
833 t = dmap[fn]
841 t = dmap[fn]
834 state = t[0]
842 state = t[0]
835 mode = t[1]
843 mode = t[1]
836 size = t[2]
844 size = t[2]
837 time = t[3]
845 time = t[3]
838
846
839 if not st and state in "nma":
847 if not st and state in "nma":
840 dadd(fn)
848 dadd(fn)
841 elif state == 'n':
849 elif state == 'n':
842 mtime = int(st.st_mtime)
850 mtime = int(st.st_mtime)
843 if (size >= 0 and
851 if (size >= 0 and
844 ((size != st.st_size and size != st.st_size & _rangemask)
852 ((size != st.st_size and size != st.st_size & _rangemask)
845 or ((mode ^ st.st_mode) & 0100 and checkexec))
853 or ((mode ^ st.st_mode) & 0100 and checkexec))
846 or size == -2 # other parent
854 or size == -2 # other parent
847 or fn in copymap):
855 or fn in copymap):
848 madd(fn)
856 madd(fn)
849 elif time != mtime and time != mtime & _rangemask:
857 elif time != mtime and time != mtime & _rangemask:
850 ladd(fn)
858 ladd(fn)
851 elif mtime == lastnormaltime:
859 elif mtime == lastnormaltime:
852 # fn may have been changed in the same timeslot without
860 # fn may have been changed in the same timeslot without
853 # changing its size. This can happen if we quickly do
861 # changing its size. This can happen if we quickly do
854 # multiple commits in a single transaction.
862 # multiple commits in a single transaction.
855 # Force lookup, so we don't miss such a racy file change.
863 # Force lookup, so we don't miss such a racy file change.
856 ladd(fn)
864 ladd(fn)
857 elif listclean:
865 elif listclean:
858 cadd(fn)
866 cadd(fn)
859 elif state == 'm':
867 elif state == 'm':
860 madd(fn)
868 madd(fn)
861 elif state == 'a':
869 elif state == 'a':
862 aadd(fn)
870 aadd(fn)
863 elif state == 'r':
871 elif state == 'r':
864 radd(fn)
872 radd(fn)
865
873
866 return (lookup, modified, added, removed, deleted, unknown, ignored,
874 return (lookup, modified, added, removed, deleted, unknown, ignored,
867 clean)
875 clean)
General Comments 0
You need to be logged in to leave comments. Login now