##// END OF EJS Templates
dirstate: use a presized dict for the dirstate...
Siddharth Agarwal -
r25585:868b7ee8 default
parent child Browse files
Show More
@@ -1,993 +1,1006 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, osutil, parsers, encoding, pathutil
10 import scmutil, util, osutil, parsers, encoding, pathutil
11 import os, stat, errno
11 import os, stat, errno
12 import match as matchmod
12 import match as matchmod
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 dirstatetuple = parsers.dirstatetuple
18 dirstatetuple = parsers.dirstatetuple
19
19
20 class repocache(filecache):
20 class repocache(filecache):
21 """filecache for files in .hg/"""
21 """filecache for files in .hg/"""
22 def join(self, obj, fname):
22 def join(self, obj, fname):
23 return obj._opener.join(fname)
23 return obj._opener.join(fname)
24
24
25 class rootcache(filecache):
25 class rootcache(filecache):
26 """filecache for files in the repository root"""
26 """filecache for files in the repository root"""
27 def join(self, obj, fname):
27 def join(self, obj, fname):
28 return obj._join(fname)
28 return obj._join(fname)
29
29
30 class dirstate(object):
30 class dirstate(object):
31
31
32 def __init__(self, opener, ui, root, validate):
32 def __init__(self, opener, ui, root, validate):
33 '''Create a new dirstate object.
33 '''Create a new dirstate object.
34
34
35 opener is an open()-like callable that can be used to open the
35 opener is an open()-like callable that can be used to open the
36 dirstate file; root is the root of the directory tracked by
36 dirstate file; root is the root of the directory tracked by
37 the dirstate.
37 the dirstate.
38 '''
38 '''
39 self._opener = opener
39 self._opener = opener
40 self._validate = validate
40 self._validate = validate
41 self._root = root
41 self._root = root
42 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
42 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
43 # UNC path pointing to root share (issue4557)
43 # UNC path pointing to root share (issue4557)
44 self._rootdir = pathutil.normasprefix(root)
44 self._rootdir = pathutil.normasprefix(root)
45 self._dirty = False
45 self._dirty = False
46 self._dirtypl = False
46 self._dirtypl = False
47 self._lastnormaltime = 0
47 self._lastnormaltime = 0
48 self._ui = ui
48 self._ui = ui
49 self._filecache = {}
49 self._filecache = {}
50 self._parentwriters = 0
50 self._parentwriters = 0
51 self._filename = 'dirstate'
51 self._filename = 'dirstate'
52
52
53 def beginparentchange(self):
53 def beginparentchange(self):
54 '''Marks the beginning of a set of changes that involve changing
54 '''Marks the beginning of a set of changes that involve changing
55 the dirstate parents. If there is an exception during this time,
55 the dirstate parents. If there is an exception during this time,
56 the dirstate will not be written when the wlock is released. This
56 the dirstate will not be written when the wlock is released. This
57 prevents writing an incoherent dirstate where the parent doesn't
57 prevents writing an incoherent dirstate where the parent doesn't
58 match the contents.
58 match the contents.
59 '''
59 '''
60 self._parentwriters += 1
60 self._parentwriters += 1
61
61
62 def endparentchange(self):
62 def endparentchange(self):
63 '''Marks the end of a set of changes that involve changing the
63 '''Marks the end of a set of changes that involve changing the
64 dirstate parents. Once all parent changes have been marked done,
64 dirstate parents. Once all parent changes have been marked done,
65 the wlock will be free to write the dirstate on release.
65 the wlock will be free to write the dirstate on release.
66 '''
66 '''
67 if self._parentwriters > 0:
67 if self._parentwriters > 0:
68 self._parentwriters -= 1
68 self._parentwriters -= 1
69
69
70 def pendingparentchange(self):
70 def pendingparentchange(self):
71 '''Returns true if the dirstate is in the middle of a set of changes
71 '''Returns true if the dirstate is in the middle of a set of changes
72 that modify the dirstate parent.
72 that modify the dirstate parent.
73 '''
73 '''
74 return self._parentwriters > 0
74 return self._parentwriters > 0
75
75
76 @propertycache
76 @propertycache
77 def _map(self):
77 def _map(self):
78 '''Return the dirstate contents as a map from filename to
78 '''Return the dirstate contents as a map from filename to
79 (state, mode, size, time).'''
79 (state, mode, size, time).'''
80 self._read()
80 self._read()
81 return self._map
81 return self._map
82
82
83 @propertycache
83 @propertycache
84 def _copymap(self):
84 def _copymap(self):
85 self._read()
85 self._read()
86 return self._copymap
86 return self._copymap
87
87
88 @propertycache
88 @propertycache
89 def _filefoldmap(self):
89 def _filefoldmap(self):
90 try:
90 try:
91 makefilefoldmap = parsers.make_file_foldmap
91 makefilefoldmap = parsers.make_file_foldmap
92 except AttributeError:
92 except AttributeError:
93 pass
93 pass
94 else:
94 else:
95 return makefilefoldmap(self._map, util.normcasespec,
95 return makefilefoldmap(self._map, util.normcasespec,
96 util.normcasefallback)
96 util.normcasefallback)
97
97
98 f = {}
98 f = {}
99 normcase = util.normcase
99 normcase = util.normcase
100 for name, s in self._map.iteritems():
100 for name, s in self._map.iteritems():
101 if s[0] != 'r':
101 if s[0] != 'r':
102 f[normcase(name)] = name
102 f[normcase(name)] = name
103 f['.'] = '.' # prevents useless util.fspath() invocation
103 f['.'] = '.' # prevents useless util.fspath() invocation
104 return f
104 return f
105
105
106 @propertycache
106 @propertycache
107 def _dirfoldmap(self):
107 def _dirfoldmap(self):
108 f = {}
108 f = {}
109 normcase = util.normcase
109 normcase = util.normcase
110 for name in self._dirs:
110 for name in self._dirs:
111 f[normcase(name)] = name
111 f[normcase(name)] = name
112 return f
112 return f
113
113
114 @repocache('branch')
114 @repocache('branch')
115 def _branch(self):
115 def _branch(self):
116 try:
116 try:
117 return self._opener.read("branch").strip() or "default"
117 return self._opener.read("branch").strip() or "default"
118 except IOError, inst:
118 except IOError, inst:
119 if inst.errno != errno.ENOENT:
119 if inst.errno != errno.ENOENT:
120 raise
120 raise
121 return "default"
121 return "default"
122
122
123 @propertycache
123 @propertycache
124 def _pl(self):
124 def _pl(self):
125 try:
125 try:
126 fp = self._opener(self._filename)
126 fp = self._opener(self._filename)
127 st = fp.read(40)
127 st = fp.read(40)
128 fp.close()
128 fp.close()
129 l = len(st)
129 l = len(st)
130 if l == 40:
130 if l == 40:
131 return st[:20], st[20:40]
131 return st[:20], st[20:40]
132 elif l > 0 and l < 40:
132 elif l > 0 and l < 40:
133 raise util.Abort(_('working directory state appears damaged!'))
133 raise util.Abort(_('working directory state appears damaged!'))
134 except IOError, err:
134 except IOError, err:
135 if err.errno != errno.ENOENT:
135 if err.errno != errno.ENOENT:
136 raise
136 raise
137 return [nullid, nullid]
137 return [nullid, nullid]
138
138
139 @propertycache
139 @propertycache
140 def _dirs(self):
140 def _dirs(self):
141 return util.dirs(self._map, 'r')
141 return util.dirs(self._map, 'r')
142
142
143 def dirs(self):
143 def dirs(self):
144 return self._dirs
144 return self._dirs
145
145
146 @rootcache('.hgignore')
146 @rootcache('.hgignore')
147 def _ignore(self):
147 def _ignore(self):
148 files = []
148 files = []
149 if os.path.exists(self._join('.hgignore')):
149 if os.path.exists(self._join('.hgignore')):
150 files.append(self._join('.hgignore'))
150 files.append(self._join('.hgignore'))
151 for name, path in self._ui.configitems("ui"):
151 for name, path in self._ui.configitems("ui"):
152 if name == 'ignore' or name.startswith('ignore.'):
152 if name == 'ignore' or name.startswith('ignore.'):
153 # we need to use os.path.join here rather than self._join
153 # we need to use os.path.join here rather than self._join
154 # because path is arbitrary and user-specified
154 # because path is arbitrary and user-specified
155 files.append(os.path.join(self._rootdir, util.expandpath(path)))
155 files.append(os.path.join(self._rootdir, util.expandpath(path)))
156
156
157 if not files:
157 if not files:
158 return util.never
158 return util.never
159
159
160 pats = ['include:%s' % f for f in files]
160 pats = ['include:%s' % f for f in files]
161 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
161 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
162
162
163 @propertycache
163 @propertycache
164 def _slash(self):
164 def _slash(self):
165 return self._ui.configbool('ui', 'slash') and os.sep != '/'
165 return self._ui.configbool('ui', 'slash') and os.sep != '/'
166
166
167 @propertycache
167 @propertycache
168 def _checklink(self):
168 def _checklink(self):
169 return util.checklink(self._root)
169 return util.checklink(self._root)
170
170
171 @propertycache
171 @propertycache
172 def _checkexec(self):
172 def _checkexec(self):
173 return util.checkexec(self._root)
173 return util.checkexec(self._root)
174
174
175 @propertycache
175 @propertycache
176 def _checkcase(self):
176 def _checkcase(self):
177 return not util.checkcase(self._join('.hg'))
177 return not util.checkcase(self._join('.hg'))
178
178
179 def _join(self, f):
179 def _join(self, f):
180 # much faster than os.path.join()
180 # much faster than os.path.join()
181 # it's safe because f is always a relative path
181 # it's safe because f is always a relative path
182 return self._rootdir + f
182 return self._rootdir + f
183
183
184 def flagfunc(self, buildfallback):
184 def flagfunc(self, buildfallback):
185 if self._checklink and self._checkexec:
185 if self._checklink and self._checkexec:
186 def f(x):
186 def f(x):
187 try:
187 try:
188 st = os.lstat(self._join(x))
188 st = os.lstat(self._join(x))
189 if util.statislink(st):
189 if util.statislink(st):
190 return 'l'
190 return 'l'
191 if util.statisexec(st):
191 if util.statisexec(st):
192 return 'x'
192 return 'x'
193 except OSError:
193 except OSError:
194 pass
194 pass
195 return ''
195 return ''
196 return f
196 return f
197
197
198 fallback = buildfallback()
198 fallback = buildfallback()
199 if self._checklink:
199 if self._checklink:
200 def f(x):
200 def f(x):
201 if os.path.islink(self._join(x)):
201 if os.path.islink(self._join(x)):
202 return 'l'
202 return 'l'
203 if 'x' in fallback(x):
203 if 'x' in fallback(x):
204 return 'x'
204 return 'x'
205 return ''
205 return ''
206 return f
206 return f
207 if self._checkexec:
207 if self._checkexec:
208 def f(x):
208 def f(x):
209 if 'l' in fallback(x):
209 if 'l' in fallback(x):
210 return 'l'
210 return 'l'
211 if util.isexec(self._join(x)):
211 if util.isexec(self._join(x)):
212 return 'x'
212 return 'x'
213 return ''
213 return ''
214 return f
214 return f
215 else:
215 else:
216 return fallback
216 return fallback
217
217
218 @propertycache
218 @propertycache
219 def _cwd(self):
219 def _cwd(self):
220 return os.getcwd()
220 return os.getcwd()
221
221
222 def getcwd(self):
222 def getcwd(self):
223 cwd = self._cwd
223 cwd = self._cwd
224 if cwd == self._root:
224 if cwd == self._root:
225 return ''
225 return ''
226 # self._root ends with a path separator if self._root is '/' or 'C:\'
226 # self._root ends with a path separator if self._root is '/' or 'C:\'
227 rootsep = self._root
227 rootsep = self._root
228 if not util.endswithsep(rootsep):
228 if not util.endswithsep(rootsep):
229 rootsep += os.sep
229 rootsep += os.sep
230 if cwd.startswith(rootsep):
230 if cwd.startswith(rootsep):
231 return cwd[len(rootsep):]
231 return cwd[len(rootsep):]
232 else:
232 else:
233 # we're outside the repo. return an absolute path.
233 # we're outside the repo. return an absolute path.
234 return cwd
234 return cwd
235
235
236 def pathto(self, f, cwd=None):
236 def pathto(self, f, cwd=None):
237 if cwd is None:
237 if cwd is None:
238 cwd = self.getcwd()
238 cwd = self.getcwd()
239 path = util.pathto(self._root, cwd, f)
239 path = util.pathto(self._root, cwd, f)
240 if self._slash:
240 if self._slash:
241 return util.pconvert(path)
241 return util.pconvert(path)
242 return path
242 return path
243
243
244 def __getitem__(self, key):
244 def __getitem__(self, key):
245 '''Return the current state of key (a filename) in the dirstate.
245 '''Return the current state of key (a filename) in the dirstate.
246
246
247 States are:
247 States are:
248 n normal
248 n normal
249 m needs merging
249 m needs merging
250 r marked for removal
250 r marked for removal
251 a marked for addition
251 a marked for addition
252 ? not tracked
252 ? not tracked
253 '''
253 '''
254 return self._map.get(key, ("?",))[0]
254 return self._map.get(key, ("?",))[0]
255
255
256 def __contains__(self, key):
256 def __contains__(self, key):
257 return key in self._map
257 return key in self._map
258
258
259 def __iter__(self):
259 def __iter__(self):
260 for x in sorted(self._map):
260 for x in sorted(self._map):
261 yield x
261 yield x
262
262
263 def iteritems(self):
263 def iteritems(self):
264 return self._map.iteritems()
264 return self._map.iteritems()
265
265
266 def parents(self):
266 def parents(self):
267 return [self._validate(p) for p in self._pl]
267 return [self._validate(p) for p in self._pl]
268
268
269 def p1(self):
269 def p1(self):
270 return self._validate(self._pl[0])
270 return self._validate(self._pl[0])
271
271
272 def p2(self):
272 def p2(self):
273 return self._validate(self._pl[1])
273 return self._validate(self._pl[1])
274
274
275 def branch(self):
275 def branch(self):
276 return encoding.tolocal(self._branch)
276 return encoding.tolocal(self._branch)
277
277
278 def setparents(self, p1, p2=nullid):
278 def setparents(self, p1, p2=nullid):
279 """Set dirstate parents to p1 and p2.
279 """Set dirstate parents to p1 and p2.
280
280
281 When moving from two parents to one, 'm' merged entries a
281 When moving from two parents to one, 'm' merged entries a
282 adjusted to normal and previous copy records discarded and
282 adjusted to normal and previous copy records discarded and
283 returned by the call.
283 returned by the call.
284
284
285 See localrepo.setparents()
285 See localrepo.setparents()
286 """
286 """
287 if self._parentwriters == 0:
287 if self._parentwriters == 0:
288 raise ValueError("cannot set dirstate parent without "
288 raise ValueError("cannot set dirstate parent without "
289 "calling dirstate.beginparentchange")
289 "calling dirstate.beginparentchange")
290
290
291 self._dirty = self._dirtypl = True
291 self._dirty = self._dirtypl = True
292 oldp2 = self._pl[1]
292 oldp2 = self._pl[1]
293 self._pl = p1, p2
293 self._pl = p1, p2
294 copies = {}
294 copies = {}
295 if oldp2 != nullid and p2 == nullid:
295 if oldp2 != nullid and p2 == nullid:
296 for f, s in self._map.iteritems():
296 for f, s in self._map.iteritems():
297 # Discard 'm' markers when moving away from a merge state
297 # Discard 'm' markers when moving away from a merge state
298 if s[0] == 'm':
298 if s[0] == 'm':
299 if f in self._copymap:
299 if f in self._copymap:
300 copies[f] = self._copymap[f]
300 copies[f] = self._copymap[f]
301 self.normallookup(f)
301 self.normallookup(f)
302 # Also fix up otherparent markers
302 # Also fix up otherparent markers
303 elif s[0] == 'n' and s[2] == -2:
303 elif s[0] == 'n' and s[2] == -2:
304 if f in self._copymap:
304 if f in self._copymap:
305 copies[f] = self._copymap[f]
305 copies[f] = self._copymap[f]
306 self.add(f)
306 self.add(f)
307 return copies
307 return copies
308
308
309 def setbranch(self, branch):
309 def setbranch(self, branch):
310 self._branch = encoding.fromlocal(branch)
310 self._branch = encoding.fromlocal(branch)
311 f = self._opener('branch', 'w', atomictemp=True)
311 f = self._opener('branch', 'w', atomictemp=True)
312 try:
312 try:
313 f.write(self._branch + '\n')
313 f.write(self._branch + '\n')
314 f.close()
314 f.close()
315
315
316 # make sure filecache has the correct stat info for _branch after
316 # make sure filecache has the correct stat info for _branch after
317 # replacing the underlying file
317 # replacing the underlying file
318 ce = self._filecache['_branch']
318 ce = self._filecache['_branch']
319 if ce:
319 if ce:
320 ce.refresh()
320 ce.refresh()
321 except: # re-raises
321 except: # re-raises
322 f.discard()
322 f.discard()
323 raise
323 raise
324
324
325 def _read(self):
325 def _read(self):
326 self._map = {}
326 self._map = {}
327 self._copymap = {}
327 self._copymap = {}
328 try:
328 try:
329 fp = self._opener.open(self._filename)
329 fp = self._opener.open(self._filename)
330 try:
330 try:
331 st = fp.read()
331 st = fp.read()
332 finally:
332 finally:
333 fp.close()
333 fp.close()
334 except IOError, err:
334 except IOError, err:
335 if err.errno != errno.ENOENT:
335 if err.errno != errno.ENOENT:
336 raise
336 raise
337 return
337 return
338 if not st:
338 if not st:
339 return
339 return
340
340
341 if util.safehasattr(parsers, 'dict_new_presized'):
342 # Make an estimate of the number of files in the dirstate based on
343 # its size. From a linear regression on a set of real-world repos,
344 # all over 10,000 files, the size of a dirstate entry is 85
345 # bytes. The cost of resizing is significantly higher than the cost
346 # of filling in a larger presized dict, so subtract 20% from the
347 # size.
348 #
349 # This heuristic is imperfect in many ways, so in a future dirstate
350 # format update it makes sense to just record the number of entries
351 # on write.
352 self._map = parsers.dict_new_presized(len(st) / 71)
353
341 # Python's garbage collector triggers a GC each time a certain number
354 # Python's garbage collector triggers a GC each time a certain number
342 # of container objects (the number being defined by
355 # of container objects (the number being defined by
343 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
356 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
344 # for each file in the dirstate. The C version then immediately marks
357 # for each file in the dirstate. The C version then immediately marks
345 # them as not to be tracked by the collector. However, this has no
358 # them as not to be tracked by the collector. However, this has no
346 # effect on when GCs are triggered, only on what objects the GC looks
359 # effect on when GCs are triggered, only on what objects the GC looks
347 # into. This means that O(number of files) GCs are unavoidable.
360 # into. This means that O(number of files) GCs are unavoidable.
348 # Depending on when in the process's lifetime the dirstate is parsed,
361 # Depending on when in the process's lifetime the dirstate is parsed,
349 # this can get very expensive. As a workaround, disable GC while
362 # this can get very expensive. As a workaround, disable GC while
350 # parsing the dirstate.
363 # parsing the dirstate.
351 #
364 #
352 # (we cannot decorate the function directly since it is in a C module)
365 # (we cannot decorate the function directly since it is in a C module)
353 parse_dirstate = util.nogc(parsers.parse_dirstate)
366 parse_dirstate = util.nogc(parsers.parse_dirstate)
354 p = parse_dirstate(self._map, self._copymap, st)
367 p = parse_dirstate(self._map, self._copymap, st)
355 if not self._dirtypl:
368 if not self._dirtypl:
356 self._pl = p
369 self._pl = p
357
370
358 def invalidate(self):
371 def invalidate(self):
359 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
372 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
360 "_pl", "_dirs", "_ignore"):
373 "_pl", "_dirs", "_ignore"):
361 if a in self.__dict__:
374 if a in self.__dict__:
362 delattr(self, a)
375 delattr(self, a)
363 self._lastnormaltime = 0
376 self._lastnormaltime = 0
364 self._dirty = False
377 self._dirty = False
365 self._parentwriters = 0
378 self._parentwriters = 0
366
379
367 def copy(self, source, dest):
380 def copy(self, source, dest):
368 """Mark dest as a copy of source. Unmark dest if source is None."""
381 """Mark dest as a copy of source. Unmark dest if source is None."""
369 if source == dest:
382 if source == dest:
370 return
383 return
371 self._dirty = True
384 self._dirty = True
372 if source is not None:
385 if source is not None:
373 self._copymap[dest] = source
386 self._copymap[dest] = source
374 elif dest in self._copymap:
387 elif dest in self._copymap:
375 del self._copymap[dest]
388 del self._copymap[dest]
376
389
377 def copied(self, file):
390 def copied(self, file):
378 return self._copymap.get(file, None)
391 return self._copymap.get(file, None)
379
392
380 def copies(self):
393 def copies(self):
381 return self._copymap
394 return self._copymap
382
395
383 def _droppath(self, f):
396 def _droppath(self, f):
384 if self[f] not in "?r" and "_dirs" in self.__dict__:
397 if self[f] not in "?r" and "_dirs" in self.__dict__:
385 self._dirs.delpath(f)
398 self._dirs.delpath(f)
386
399
387 def _addpath(self, f, state, mode, size, mtime):
400 def _addpath(self, f, state, mode, size, mtime):
388 oldstate = self[f]
401 oldstate = self[f]
389 if state == 'a' or oldstate == 'r':
402 if state == 'a' or oldstate == 'r':
390 scmutil.checkfilename(f)
403 scmutil.checkfilename(f)
391 if f in self._dirs:
404 if f in self._dirs:
392 raise util.Abort(_('directory %r already in dirstate') % f)
405 raise util.Abort(_('directory %r already in dirstate') % f)
393 # shadows
406 # shadows
394 for d in util.finddirs(f):
407 for d in util.finddirs(f):
395 if d in self._dirs:
408 if d in self._dirs:
396 break
409 break
397 if d in self._map and self[d] != 'r':
410 if d in self._map and self[d] != 'r':
398 raise util.Abort(
411 raise util.Abort(
399 _('file %r in dirstate clashes with %r') % (d, f))
412 _('file %r in dirstate clashes with %r') % (d, f))
400 if oldstate in "?r" and "_dirs" in self.__dict__:
413 if oldstate in "?r" and "_dirs" in self.__dict__:
401 self._dirs.addpath(f)
414 self._dirs.addpath(f)
402 self._dirty = True
415 self._dirty = True
403 self._map[f] = dirstatetuple(state, mode, size, mtime)
416 self._map[f] = dirstatetuple(state, mode, size, mtime)
404
417
405 def normal(self, f):
418 def normal(self, f):
406 '''Mark a file normal and clean.'''
419 '''Mark a file normal and clean.'''
407 s = os.lstat(self._join(f))
420 s = os.lstat(self._join(f))
408 mtime = int(s.st_mtime)
421 mtime = int(s.st_mtime)
409 self._addpath(f, 'n', s.st_mode,
422 self._addpath(f, 'n', s.st_mode,
410 s.st_size & _rangemask, mtime & _rangemask)
423 s.st_size & _rangemask, mtime & _rangemask)
411 if f in self._copymap:
424 if f in self._copymap:
412 del self._copymap[f]
425 del self._copymap[f]
413 if mtime > self._lastnormaltime:
426 if mtime > self._lastnormaltime:
414 # Remember the most recent modification timeslot for status(),
427 # Remember the most recent modification timeslot for status(),
415 # to make sure we won't miss future size-preserving file content
428 # to make sure we won't miss future size-preserving file content
416 # modifications that happen within the same timeslot.
429 # modifications that happen within the same timeslot.
417 self._lastnormaltime = mtime
430 self._lastnormaltime = mtime
418
431
419 def normallookup(self, f):
432 def normallookup(self, f):
420 '''Mark a file normal, but possibly dirty.'''
433 '''Mark a file normal, but possibly dirty.'''
421 if self._pl[1] != nullid and f in self._map:
434 if self._pl[1] != nullid and f in self._map:
422 # if there is a merge going on and the file was either
435 # if there is a merge going on and the file was either
423 # in state 'm' (-1) or coming from other parent (-2) before
436 # in state 'm' (-1) or coming from other parent (-2) before
424 # being removed, restore that state.
437 # being removed, restore that state.
425 entry = self._map[f]
438 entry = self._map[f]
426 if entry[0] == 'r' and entry[2] in (-1, -2):
439 if entry[0] == 'r' and entry[2] in (-1, -2):
427 source = self._copymap.get(f)
440 source = self._copymap.get(f)
428 if entry[2] == -1:
441 if entry[2] == -1:
429 self.merge(f)
442 self.merge(f)
430 elif entry[2] == -2:
443 elif entry[2] == -2:
431 self.otherparent(f)
444 self.otherparent(f)
432 if source:
445 if source:
433 self.copy(source, f)
446 self.copy(source, f)
434 return
447 return
435 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
448 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
436 return
449 return
437 self._addpath(f, 'n', 0, -1, -1)
450 self._addpath(f, 'n', 0, -1, -1)
438 if f in self._copymap:
451 if f in self._copymap:
439 del self._copymap[f]
452 del self._copymap[f]
440
453
441 def otherparent(self, f):
454 def otherparent(self, f):
442 '''Mark as coming from the other parent, always dirty.'''
455 '''Mark as coming from the other parent, always dirty.'''
443 if self._pl[1] == nullid:
456 if self._pl[1] == nullid:
444 raise util.Abort(_("setting %r to other parent "
457 raise util.Abort(_("setting %r to other parent "
445 "only allowed in merges") % f)
458 "only allowed in merges") % f)
446 if f in self and self[f] == 'n':
459 if f in self and self[f] == 'n':
447 # merge-like
460 # merge-like
448 self._addpath(f, 'm', 0, -2, -1)
461 self._addpath(f, 'm', 0, -2, -1)
449 else:
462 else:
450 # add-like
463 # add-like
451 self._addpath(f, 'n', 0, -2, -1)
464 self._addpath(f, 'n', 0, -2, -1)
452
465
453 if f in self._copymap:
466 if f in self._copymap:
454 del self._copymap[f]
467 del self._copymap[f]
455
468
456 def add(self, f):
469 def add(self, f):
457 '''Mark a file added.'''
470 '''Mark a file added.'''
458 self._addpath(f, 'a', 0, -1, -1)
471 self._addpath(f, 'a', 0, -1, -1)
459 if f in self._copymap:
472 if f in self._copymap:
460 del self._copymap[f]
473 del self._copymap[f]
461
474
462 def remove(self, f):
475 def remove(self, f):
463 '''Mark a file removed.'''
476 '''Mark a file removed.'''
464 self._dirty = True
477 self._dirty = True
465 self._droppath(f)
478 self._droppath(f)
466 size = 0
479 size = 0
467 if self._pl[1] != nullid and f in self._map:
480 if self._pl[1] != nullid and f in self._map:
468 # backup the previous state
481 # backup the previous state
469 entry = self._map[f]
482 entry = self._map[f]
470 if entry[0] == 'm': # merge
483 if entry[0] == 'm': # merge
471 size = -1
484 size = -1
472 elif entry[0] == 'n' and entry[2] == -2: # other parent
485 elif entry[0] == 'n' and entry[2] == -2: # other parent
473 size = -2
486 size = -2
474 self._map[f] = dirstatetuple('r', 0, size, 0)
487 self._map[f] = dirstatetuple('r', 0, size, 0)
475 if size == 0 and f in self._copymap:
488 if size == 0 and f in self._copymap:
476 del self._copymap[f]
489 del self._copymap[f]
477
490
478 def merge(self, f):
491 def merge(self, f):
479 '''Mark a file merged.'''
492 '''Mark a file merged.'''
480 if self._pl[1] == nullid:
493 if self._pl[1] == nullid:
481 return self.normallookup(f)
494 return self.normallookup(f)
482 return self.otherparent(f)
495 return self.otherparent(f)
483
496
484 def drop(self, f):
497 def drop(self, f):
485 '''Drop a file from the dirstate'''
498 '''Drop a file from the dirstate'''
486 if f in self._map:
499 if f in self._map:
487 self._dirty = True
500 self._dirty = True
488 self._droppath(f)
501 self._droppath(f)
489 del self._map[f]
502 del self._map[f]
490
503
491 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
504 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
492 if exists is None:
505 if exists is None:
493 exists = os.path.lexists(os.path.join(self._root, path))
506 exists = os.path.lexists(os.path.join(self._root, path))
494 if not exists:
507 if not exists:
495 # Maybe a path component exists
508 # Maybe a path component exists
496 if not ignoremissing and '/' in path:
509 if not ignoremissing and '/' in path:
497 d, f = path.rsplit('/', 1)
510 d, f = path.rsplit('/', 1)
498 d = self._normalize(d, False, ignoremissing, None)
511 d = self._normalize(d, False, ignoremissing, None)
499 folded = d + "/" + f
512 folded = d + "/" + f
500 else:
513 else:
501 # No path components, preserve original case
514 # No path components, preserve original case
502 folded = path
515 folded = path
503 else:
516 else:
504 # recursively normalize leading directory components
517 # recursively normalize leading directory components
505 # against dirstate
518 # against dirstate
506 if '/' in normed:
519 if '/' in normed:
507 d, f = normed.rsplit('/', 1)
520 d, f = normed.rsplit('/', 1)
508 d = self._normalize(d, False, ignoremissing, True)
521 d = self._normalize(d, False, ignoremissing, True)
509 r = self._root + "/" + d
522 r = self._root + "/" + d
510 folded = d + "/" + util.fspath(f, r)
523 folded = d + "/" + util.fspath(f, r)
511 else:
524 else:
512 folded = util.fspath(normed, self._root)
525 folded = util.fspath(normed, self._root)
513 storemap[normed] = folded
526 storemap[normed] = folded
514
527
515 return folded
528 return folded
516
529
517 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
530 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
518 normed = util.normcase(path)
531 normed = util.normcase(path)
519 folded = self._filefoldmap.get(normed, None)
532 folded = self._filefoldmap.get(normed, None)
520 if folded is None:
533 if folded is None:
521 if isknown:
534 if isknown:
522 folded = path
535 folded = path
523 else:
536 else:
524 folded = self._discoverpath(path, normed, ignoremissing, exists,
537 folded = self._discoverpath(path, normed, ignoremissing, exists,
525 self._filefoldmap)
538 self._filefoldmap)
526 return folded
539 return folded
527
540
528 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
541 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
529 normed = util.normcase(path)
542 normed = util.normcase(path)
530 folded = self._filefoldmap.get(normed, None)
543 folded = self._filefoldmap.get(normed, None)
531 if folded is None:
544 if folded is None:
532 folded = self._dirfoldmap.get(normed, None)
545 folded = self._dirfoldmap.get(normed, None)
533 if folded is None:
546 if folded is None:
534 if isknown:
547 if isknown:
535 folded = path
548 folded = path
536 else:
549 else:
537 # store discovered result in dirfoldmap so that future
550 # store discovered result in dirfoldmap so that future
538 # normalizefile calls don't start matching directories
551 # normalizefile calls don't start matching directories
539 folded = self._discoverpath(path, normed, ignoremissing, exists,
552 folded = self._discoverpath(path, normed, ignoremissing, exists,
540 self._dirfoldmap)
553 self._dirfoldmap)
541 return folded
554 return folded
542
555
543 def normalize(self, path, isknown=False, ignoremissing=False):
556 def normalize(self, path, isknown=False, ignoremissing=False):
544 '''
557 '''
545 normalize the case of a pathname when on a casefolding filesystem
558 normalize the case of a pathname when on a casefolding filesystem
546
559
547 isknown specifies whether the filename came from walking the
560 isknown specifies whether the filename came from walking the
548 disk, to avoid extra filesystem access.
561 disk, to avoid extra filesystem access.
549
562
550 If ignoremissing is True, missing path are returned
563 If ignoremissing is True, missing path are returned
551 unchanged. Otherwise, we try harder to normalize possibly
564 unchanged. Otherwise, we try harder to normalize possibly
552 existing path components.
565 existing path components.
553
566
554 The normalized case is determined based on the following precedence:
567 The normalized case is determined based on the following precedence:
555
568
556 - version of name already stored in the dirstate
569 - version of name already stored in the dirstate
557 - version of name stored on disk
570 - version of name stored on disk
558 - version provided via command arguments
571 - version provided via command arguments
559 '''
572 '''
560
573
561 if self._checkcase:
574 if self._checkcase:
562 return self._normalize(path, isknown, ignoremissing)
575 return self._normalize(path, isknown, ignoremissing)
563 return path
576 return path
564
577
565 def clear(self):
578 def clear(self):
566 self._map = {}
579 self._map = {}
567 if "_dirs" in self.__dict__:
580 if "_dirs" in self.__dict__:
568 delattr(self, "_dirs")
581 delattr(self, "_dirs")
569 self._copymap = {}
582 self._copymap = {}
570 self._pl = [nullid, nullid]
583 self._pl = [nullid, nullid]
571 self._lastnormaltime = 0
584 self._lastnormaltime = 0
572 self._dirty = True
585 self._dirty = True
573
586
574 def rebuild(self, parent, allfiles, changedfiles=None):
587 def rebuild(self, parent, allfiles, changedfiles=None):
575 if changedfiles is None:
588 if changedfiles is None:
576 changedfiles = allfiles
589 changedfiles = allfiles
577 oldmap = self._map
590 oldmap = self._map
578 self.clear()
591 self.clear()
579 for f in allfiles:
592 for f in allfiles:
580 if f not in changedfiles:
593 if f not in changedfiles:
581 self._map[f] = oldmap[f]
594 self._map[f] = oldmap[f]
582 else:
595 else:
583 if 'x' in allfiles.flags(f):
596 if 'x' in allfiles.flags(f):
584 self._map[f] = dirstatetuple('n', 0777, -1, 0)
597 self._map[f] = dirstatetuple('n', 0777, -1, 0)
585 else:
598 else:
586 self._map[f] = dirstatetuple('n', 0666, -1, 0)
599 self._map[f] = dirstatetuple('n', 0666, -1, 0)
587 self._pl = (parent, nullid)
600 self._pl = (parent, nullid)
588 self._dirty = True
601 self._dirty = True
589
602
590 def write(self):
603 def write(self):
591 if not self._dirty:
604 if not self._dirty:
592 return
605 return
593
606
594 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
607 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
595 # timestamp of each entries in dirstate, because of 'now > mtime'
608 # timestamp of each entries in dirstate, because of 'now > mtime'
596 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
609 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
597 if delaywrite > 0:
610 if delaywrite > 0:
598 import time # to avoid useless import
611 import time # to avoid useless import
599 time.sleep(delaywrite)
612 time.sleep(delaywrite)
600
613
601 st = self._opener(self._filename, "w", atomictemp=True)
614 st = self._opener(self._filename, "w", atomictemp=True)
602 # use the modification time of the newly created temporary file as the
615 # use the modification time of the newly created temporary file as the
603 # filesystem's notion of 'now'
616 # filesystem's notion of 'now'
604 now = util.fstat(st).st_mtime
617 now = util.fstat(st).st_mtime
605 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
618 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
606 st.close()
619 st.close()
607 self._lastnormaltime = 0
620 self._lastnormaltime = 0
608 self._dirty = self._dirtypl = False
621 self._dirty = self._dirtypl = False
609
622
610 def _dirignore(self, f):
623 def _dirignore(self, f):
611 if f == '.':
624 if f == '.':
612 return False
625 return False
613 if self._ignore(f):
626 if self._ignore(f):
614 return True
627 return True
615 for p in util.finddirs(f):
628 for p in util.finddirs(f):
616 if self._ignore(p):
629 if self._ignore(p):
617 return True
630 return True
618 return False
631 return False
619
632
620 def _walkexplicit(self, match, subrepos):
633 def _walkexplicit(self, match, subrepos):
621 '''Get stat data about the files explicitly specified by match.
634 '''Get stat data about the files explicitly specified by match.
622
635
623 Return a triple (results, dirsfound, dirsnotfound).
636 Return a triple (results, dirsfound, dirsnotfound).
624 - results is a mapping from filename to stat result. It also contains
637 - results is a mapping from filename to stat result. It also contains
625 listings mapping subrepos and .hg to None.
638 listings mapping subrepos and .hg to None.
626 - dirsfound is a list of files found to be directories.
639 - dirsfound is a list of files found to be directories.
627 - dirsnotfound is a list of files that the dirstate thinks are
640 - dirsnotfound is a list of files that the dirstate thinks are
628 directories and that were not found.'''
641 directories and that were not found.'''
629
642
630 def badtype(mode):
643 def badtype(mode):
631 kind = _('unknown')
644 kind = _('unknown')
632 if stat.S_ISCHR(mode):
645 if stat.S_ISCHR(mode):
633 kind = _('character device')
646 kind = _('character device')
634 elif stat.S_ISBLK(mode):
647 elif stat.S_ISBLK(mode):
635 kind = _('block device')
648 kind = _('block device')
636 elif stat.S_ISFIFO(mode):
649 elif stat.S_ISFIFO(mode):
637 kind = _('fifo')
650 kind = _('fifo')
638 elif stat.S_ISSOCK(mode):
651 elif stat.S_ISSOCK(mode):
639 kind = _('socket')
652 kind = _('socket')
640 elif stat.S_ISDIR(mode):
653 elif stat.S_ISDIR(mode):
641 kind = _('directory')
654 kind = _('directory')
642 return _('unsupported file type (type is %s)') % kind
655 return _('unsupported file type (type is %s)') % kind
643
656
644 matchedir = match.explicitdir
657 matchedir = match.explicitdir
645 badfn = match.bad
658 badfn = match.bad
646 dmap = self._map
659 dmap = self._map
647 lstat = os.lstat
660 lstat = os.lstat
648 getkind = stat.S_IFMT
661 getkind = stat.S_IFMT
649 dirkind = stat.S_IFDIR
662 dirkind = stat.S_IFDIR
650 regkind = stat.S_IFREG
663 regkind = stat.S_IFREG
651 lnkkind = stat.S_IFLNK
664 lnkkind = stat.S_IFLNK
652 join = self._join
665 join = self._join
653 dirsfound = []
666 dirsfound = []
654 foundadd = dirsfound.append
667 foundadd = dirsfound.append
655 dirsnotfound = []
668 dirsnotfound = []
656 notfoundadd = dirsnotfound.append
669 notfoundadd = dirsnotfound.append
657
670
658 if not match.isexact() and self._checkcase:
671 if not match.isexact() and self._checkcase:
659 normalize = self._normalize
672 normalize = self._normalize
660 else:
673 else:
661 normalize = None
674 normalize = None
662
675
663 files = sorted(match.files())
676 files = sorted(match.files())
664 subrepos.sort()
677 subrepos.sort()
665 i, j = 0, 0
678 i, j = 0, 0
666 while i < len(files) and j < len(subrepos):
679 while i < len(files) and j < len(subrepos):
667 subpath = subrepos[j] + "/"
680 subpath = subrepos[j] + "/"
668 if files[i] < subpath:
681 if files[i] < subpath:
669 i += 1
682 i += 1
670 continue
683 continue
671 while i < len(files) and files[i].startswith(subpath):
684 while i < len(files) and files[i].startswith(subpath):
672 del files[i]
685 del files[i]
673 j += 1
686 j += 1
674
687
675 if not files or '.' in files:
688 if not files or '.' in files:
676 files = ['.']
689 files = ['.']
677 results = dict.fromkeys(subrepos)
690 results = dict.fromkeys(subrepos)
678 results['.hg'] = None
691 results['.hg'] = None
679
692
680 alldirs = None
693 alldirs = None
681 for ff in files:
694 for ff in files:
682 # constructing the foldmap is expensive, so don't do it for the
695 # constructing the foldmap is expensive, so don't do it for the
683 # common case where files is ['.']
696 # common case where files is ['.']
684 if normalize and ff != '.':
697 if normalize and ff != '.':
685 nf = normalize(ff, False, True)
698 nf = normalize(ff, False, True)
686 else:
699 else:
687 nf = ff
700 nf = ff
688 if nf in results:
701 if nf in results:
689 continue
702 continue
690
703
691 try:
704 try:
692 st = lstat(join(nf))
705 st = lstat(join(nf))
693 kind = getkind(st.st_mode)
706 kind = getkind(st.st_mode)
694 if kind == dirkind:
707 if kind == dirkind:
695 if nf in dmap:
708 if nf in dmap:
696 # file replaced by dir on disk but still in dirstate
709 # file replaced by dir on disk but still in dirstate
697 results[nf] = None
710 results[nf] = None
698 if matchedir:
711 if matchedir:
699 matchedir(nf)
712 matchedir(nf)
700 foundadd((nf, ff))
713 foundadd((nf, ff))
701 elif kind == regkind or kind == lnkkind:
714 elif kind == regkind or kind == lnkkind:
702 results[nf] = st
715 results[nf] = st
703 else:
716 else:
704 badfn(ff, badtype(kind))
717 badfn(ff, badtype(kind))
705 if nf in dmap:
718 if nf in dmap:
706 results[nf] = None
719 results[nf] = None
707 except OSError, inst: # nf not found on disk - it is dirstate only
720 except OSError, inst: # nf not found on disk - it is dirstate only
708 if nf in dmap: # does it exactly match a missing file?
721 if nf in dmap: # does it exactly match a missing file?
709 results[nf] = None
722 results[nf] = None
710 else: # does it match a missing directory?
723 else: # does it match a missing directory?
711 if alldirs is None:
724 if alldirs is None:
712 alldirs = util.dirs(dmap)
725 alldirs = util.dirs(dmap)
713 if nf in alldirs:
726 if nf in alldirs:
714 if matchedir:
727 if matchedir:
715 matchedir(nf)
728 matchedir(nf)
716 notfoundadd(nf)
729 notfoundadd(nf)
717 else:
730 else:
718 badfn(ff, inst.strerror)
731 badfn(ff, inst.strerror)
719
732
720 return results, dirsfound, dirsnotfound
733 return results, dirsfound, dirsnotfound
721
734
722 def walk(self, match, subrepos, unknown, ignored, full=True):
735 def walk(self, match, subrepos, unknown, ignored, full=True):
723 '''
736 '''
724 Walk recursively through the directory tree, finding all files
737 Walk recursively through the directory tree, finding all files
725 matched by match.
738 matched by match.
726
739
727 If full is False, maybe skip some known-clean files.
740 If full is False, maybe skip some known-clean files.
728
741
729 Return a dict mapping filename to stat-like object (either
742 Return a dict mapping filename to stat-like object (either
730 mercurial.osutil.stat instance or return value of os.stat()).
743 mercurial.osutil.stat instance or return value of os.stat()).
731
744
732 '''
745 '''
733 # full is a flag that extensions that hook into walk can use -- this
746 # full is a flag that extensions that hook into walk can use -- this
734 # implementation doesn't use it at all. This satisfies the contract
747 # implementation doesn't use it at all. This satisfies the contract
735 # because we only guarantee a "maybe".
748 # because we only guarantee a "maybe".
736
749
737 if ignored:
750 if ignored:
738 ignore = util.never
751 ignore = util.never
739 dirignore = util.never
752 dirignore = util.never
740 elif unknown:
753 elif unknown:
741 ignore = self._ignore
754 ignore = self._ignore
742 dirignore = self._dirignore
755 dirignore = self._dirignore
743 else:
756 else:
744 # if not unknown and not ignored, drop dir recursion and step 2
757 # if not unknown and not ignored, drop dir recursion and step 2
745 ignore = util.always
758 ignore = util.always
746 dirignore = util.always
759 dirignore = util.always
747
760
748 matchfn = match.matchfn
761 matchfn = match.matchfn
749 matchalways = match.always()
762 matchalways = match.always()
750 matchtdir = match.traversedir
763 matchtdir = match.traversedir
751 dmap = self._map
764 dmap = self._map
752 listdir = osutil.listdir
765 listdir = osutil.listdir
753 lstat = os.lstat
766 lstat = os.lstat
754 dirkind = stat.S_IFDIR
767 dirkind = stat.S_IFDIR
755 regkind = stat.S_IFREG
768 regkind = stat.S_IFREG
756 lnkkind = stat.S_IFLNK
769 lnkkind = stat.S_IFLNK
757 join = self._join
770 join = self._join
758
771
759 exact = skipstep3 = False
772 exact = skipstep3 = False
760 if match.isexact(): # match.exact
773 if match.isexact(): # match.exact
761 exact = True
774 exact = True
762 dirignore = util.always # skip step 2
775 dirignore = util.always # skip step 2
763 elif match.prefix(): # match.match, no patterns
776 elif match.prefix(): # match.match, no patterns
764 skipstep3 = True
777 skipstep3 = True
765
778
766 if not exact and self._checkcase:
779 if not exact and self._checkcase:
767 normalize = self._normalize
780 normalize = self._normalize
768 normalizefile = self._normalizefile
781 normalizefile = self._normalizefile
769 skipstep3 = False
782 skipstep3 = False
770 else:
783 else:
771 normalize = self._normalize
784 normalize = self._normalize
772 normalizefile = None
785 normalizefile = None
773
786
774 # step 1: find all explicit files
787 # step 1: find all explicit files
775 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
788 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
776
789
777 skipstep3 = skipstep3 and not (work or dirsnotfound)
790 skipstep3 = skipstep3 and not (work or dirsnotfound)
778 work = [d for d in work if not dirignore(d[0])]
791 work = [d for d in work if not dirignore(d[0])]
779
792
780 # step 2: visit subdirectories
793 # step 2: visit subdirectories
781 def traverse(work, alreadynormed):
794 def traverse(work, alreadynormed):
782 wadd = work.append
795 wadd = work.append
783 while work:
796 while work:
784 nd = work.pop()
797 nd = work.pop()
785 skip = None
798 skip = None
786 if nd == '.':
799 if nd == '.':
787 nd = ''
800 nd = ''
788 else:
801 else:
789 skip = '.hg'
802 skip = '.hg'
790 try:
803 try:
791 entries = listdir(join(nd), stat=True, skip=skip)
804 entries = listdir(join(nd), stat=True, skip=skip)
792 except OSError, inst:
805 except OSError, inst:
793 if inst.errno in (errno.EACCES, errno.ENOENT):
806 if inst.errno in (errno.EACCES, errno.ENOENT):
794 match.bad(self.pathto(nd), inst.strerror)
807 match.bad(self.pathto(nd), inst.strerror)
795 continue
808 continue
796 raise
809 raise
797 for f, kind, st in entries:
810 for f, kind, st in entries:
798 if normalizefile:
811 if normalizefile:
799 # even though f might be a directory, we're only
812 # even though f might be a directory, we're only
800 # interested in comparing it to files currently in the
813 # interested in comparing it to files currently in the
801 # dmap -- therefore normalizefile is enough
814 # dmap -- therefore normalizefile is enough
802 nf = normalizefile(nd and (nd + "/" + f) or f, True,
815 nf = normalizefile(nd and (nd + "/" + f) or f, True,
803 True)
816 True)
804 else:
817 else:
805 nf = nd and (nd + "/" + f) or f
818 nf = nd and (nd + "/" + f) or f
806 if nf not in results:
819 if nf not in results:
807 if kind == dirkind:
820 if kind == dirkind:
808 if not ignore(nf):
821 if not ignore(nf):
809 if matchtdir:
822 if matchtdir:
810 matchtdir(nf)
823 matchtdir(nf)
811 wadd(nf)
824 wadd(nf)
812 if nf in dmap and (matchalways or matchfn(nf)):
825 if nf in dmap and (matchalways or matchfn(nf)):
813 results[nf] = None
826 results[nf] = None
814 elif kind == regkind or kind == lnkkind:
827 elif kind == regkind or kind == lnkkind:
815 if nf in dmap:
828 if nf in dmap:
816 if matchalways or matchfn(nf):
829 if matchalways or matchfn(nf):
817 results[nf] = st
830 results[nf] = st
818 elif ((matchalways or matchfn(nf))
831 elif ((matchalways or matchfn(nf))
819 and not ignore(nf)):
832 and not ignore(nf)):
820 # unknown file -- normalize if necessary
833 # unknown file -- normalize if necessary
821 if not alreadynormed:
834 if not alreadynormed:
822 nf = normalize(nf, False, True)
835 nf = normalize(nf, False, True)
823 results[nf] = st
836 results[nf] = st
824 elif nf in dmap and (matchalways or matchfn(nf)):
837 elif nf in dmap and (matchalways or matchfn(nf)):
825 results[nf] = None
838 results[nf] = None
826
839
827 for nd, d in work:
840 for nd, d in work:
828 # alreadynormed means that processwork doesn't have to do any
841 # alreadynormed means that processwork doesn't have to do any
829 # expensive directory normalization
842 # expensive directory normalization
830 alreadynormed = not normalize or nd == d
843 alreadynormed = not normalize or nd == d
831 traverse([d], alreadynormed)
844 traverse([d], alreadynormed)
832
845
833 for s in subrepos:
846 for s in subrepos:
834 del results[s]
847 del results[s]
835 del results['.hg']
848 del results['.hg']
836
849
837 # step 3: visit remaining files from dmap
850 # step 3: visit remaining files from dmap
838 if not skipstep3 and not exact:
851 if not skipstep3 and not exact:
839 # If a dmap file is not in results yet, it was either
852 # If a dmap file is not in results yet, it was either
840 # a) not matching matchfn b) ignored, c) missing, or d) under a
853 # a) not matching matchfn b) ignored, c) missing, or d) under a
841 # symlink directory.
854 # symlink directory.
842 if not results and matchalways:
855 if not results and matchalways:
843 visit = dmap.keys()
856 visit = dmap.keys()
844 else:
857 else:
845 visit = [f for f in dmap if f not in results and matchfn(f)]
858 visit = [f for f in dmap if f not in results and matchfn(f)]
846 visit.sort()
859 visit.sort()
847
860
848 if unknown:
861 if unknown:
849 # unknown == True means we walked all dirs under the roots
862 # unknown == True means we walked all dirs under the roots
850 # that wasn't ignored, and everything that matched was stat'ed
863 # that wasn't ignored, and everything that matched was stat'ed
851 # and is already in results.
864 # and is already in results.
852 # The rest must thus be ignored or under a symlink.
865 # The rest must thus be ignored or under a symlink.
853 audit_path = pathutil.pathauditor(self._root)
866 audit_path = pathutil.pathauditor(self._root)
854
867
855 for nf in iter(visit):
868 for nf in iter(visit):
856 # If a stat for the same file was already added with a
869 # If a stat for the same file was already added with a
857 # different case, don't add one for this, since that would
870 # different case, don't add one for this, since that would
858 # make it appear as if the file exists under both names
871 # make it appear as if the file exists under both names
859 # on disk.
872 # on disk.
860 if (normalizefile and
873 if (normalizefile and
861 normalizefile(nf, True, True) in results):
874 normalizefile(nf, True, True) in results):
862 results[nf] = None
875 results[nf] = None
863 # Report ignored items in the dmap as long as they are not
876 # Report ignored items in the dmap as long as they are not
864 # under a symlink directory.
877 # under a symlink directory.
865 elif audit_path.check(nf):
878 elif audit_path.check(nf):
866 try:
879 try:
867 results[nf] = lstat(join(nf))
880 results[nf] = lstat(join(nf))
868 # file was just ignored, no links, and exists
881 # file was just ignored, no links, and exists
869 except OSError:
882 except OSError:
870 # file doesn't exist
883 # file doesn't exist
871 results[nf] = None
884 results[nf] = None
872 else:
885 else:
873 # It's either missing or under a symlink directory
886 # It's either missing or under a symlink directory
874 # which we in this case report as missing
887 # which we in this case report as missing
875 results[nf] = None
888 results[nf] = None
876 else:
889 else:
877 # We may not have walked the full directory tree above,
890 # We may not have walked the full directory tree above,
878 # so stat and check everything we missed.
891 # so stat and check everything we missed.
879 nf = iter(visit).next
892 nf = iter(visit).next
880 for st in util.statfiles([join(i) for i in visit]):
893 for st in util.statfiles([join(i) for i in visit]):
881 results[nf()] = st
894 results[nf()] = st
882 return results
895 return results
883
896
884 def status(self, match, subrepos, ignored, clean, unknown):
897 def status(self, match, subrepos, ignored, clean, unknown):
885 '''Determine the status of the working copy relative to the
898 '''Determine the status of the working copy relative to the
886 dirstate and return a pair of (unsure, status), where status is of type
899 dirstate and return a pair of (unsure, status), where status is of type
887 scmutil.status and:
900 scmutil.status and:
888
901
889 unsure:
902 unsure:
890 files that might have been modified since the dirstate was
903 files that might have been modified since the dirstate was
891 written, but need to be read to be sure (size is the same
904 written, but need to be read to be sure (size is the same
892 but mtime differs)
905 but mtime differs)
893 status.modified:
906 status.modified:
894 files that have definitely been modified since the dirstate
907 files that have definitely been modified since the dirstate
895 was written (different size or mode)
908 was written (different size or mode)
896 status.clean:
909 status.clean:
897 files that have definitely not been modified since the
910 files that have definitely not been modified since the
898 dirstate was written
911 dirstate was written
899 '''
912 '''
900 listignored, listclean, listunknown = ignored, clean, unknown
913 listignored, listclean, listunknown = ignored, clean, unknown
901 lookup, modified, added, unknown, ignored = [], [], [], [], []
914 lookup, modified, added, unknown, ignored = [], [], [], [], []
902 removed, deleted, clean = [], [], []
915 removed, deleted, clean = [], [], []
903
916
904 dmap = self._map
917 dmap = self._map
905 ladd = lookup.append # aka "unsure"
918 ladd = lookup.append # aka "unsure"
906 madd = modified.append
919 madd = modified.append
907 aadd = added.append
920 aadd = added.append
908 uadd = unknown.append
921 uadd = unknown.append
909 iadd = ignored.append
922 iadd = ignored.append
910 radd = removed.append
923 radd = removed.append
911 dadd = deleted.append
924 dadd = deleted.append
912 cadd = clean.append
925 cadd = clean.append
913 mexact = match.exact
926 mexact = match.exact
914 dirignore = self._dirignore
927 dirignore = self._dirignore
915 checkexec = self._checkexec
928 checkexec = self._checkexec
916 copymap = self._copymap
929 copymap = self._copymap
917 lastnormaltime = self._lastnormaltime
930 lastnormaltime = self._lastnormaltime
918
931
919 # We need to do full walks when either
932 # We need to do full walks when either
920 # - we're listing all clean files, or
933 # - we're listing all clean files, or
921 # - match.traversedir does something, because match.traversedir should
934 # - match.traversedir does something, because match.traversedir should
922 # be called for every dir in the working dir
935 # be called for every dir in the working dir
923 full = listclean or match.traversedir is not None
936 full = listclean or match.traversedir is not None
924 for fn, st in self.walk(match, subrepos, listunknown, listignored,
937 for fn, st in self.walk(match, subrepos, listunknown, listignored,
925 full=full).iteritems():
938 full=full).iteritems():
926 if fn not in dmap:
939 if fn not in dmap:
927 if (listignored or mexact(fn)) and dirignore(fn):
940 if (listignored or mexact(fn)) and dirignore(fn):
928 if listignored:
941 if listignored:
929 iadd(fn)
942 iadd(fn)
930 else:
943 else:
931 uadd(fn)
944 uadd(fn)
932 continue
945 continue
933
946
934 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
947 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
935 # written like that for performance reasons. dmap[fn] is not a
948 # written like that for performance reasons. dmap[fn] is not a
936 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
949 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
937 # opcode has fast paths when the value to be unpacked is a tuple or
950 # opcode has fast paths when the value to be unpacked is a tuple or
938 # a list, but falls back to creating a full-fledged iterator in
951 # a list, but falls back to creating a full-fledged iterator in
939 # general. That is much slower than simply accessing and storing the
952 # general. That is much slower than simply accessing and storing the
940 # tuple members one by one.
953 # tuple members one by one.
941 t = dmap[fn]
954 t = dmap[fn]
942 state = t[0]
955 state = t[0]
943 mode = t[1]
956 mode = t[1]
944 size = t[2]
957 size = t[2]
945 time = t[3]
958 time = t[3]
946
959
947 if not st and state in "nma":
960 if not st and state in "nma":
948 dadd(fn)
961 dadd(fn)
949 elif state == 'n':
962 elif state == 'n':
950 mtime = int(st.st_mtime)
963 mtime = int(st.st_mtime)
951 if (size >= 0 and
964 if (size >= 0 and
952 ((size != st.st_size and size != st.st_size & _rangemask)
965 ((size != st.st_size and size != st.st_size & _rangemask)
953 or ((mode ^ st.st_mode) & 0100 and checkexec))
966 or ((mode ^ st.st_mode) & 0100 and checkexec))
954 or size == -2 # other parent
967 or size == -2 # other parent
955 or fn in copymap):
968 or fn in copymap):
956 madd(fn)
969 madd(fn)
957 elif time != mtime and time != mtime & _rangemask:
970 elif time != mtime and time != mtime & _rangemask:
958 ladd(fn)
971 ladd(fn)
959 elif mtime == lastnormaltime:
972 elif mtime == lastnormaltime:
960 # fn may have just been marked as normal and it may have
973 # fn may have just been marked as normal and it may have
961 # changed in the same second without changing its size.
974 # changed in the same second without changing its size.
962 # This can happen if we quickly do multiple commits.
975 # This can happen if we quickly do multiple commits.
963 # Force lookup, so we don't miss such a racy file change.
976 # Force lookup, so we don't miss such a racy file change.
964 ladd(fn)
977 ladd(fn)
965 elif listclean:
978 elif listclean:
966 cadd(fn)
979 cadd(fn)
967 elif state == 'm':
980 elif state == 'm':
968 madd(fn)
981 madd(fn)
969 elif state == 'a':
982 elif state == 'a':
970 aadd(fn)
983 aadd(fn)
971 elif state == 'r':
984 elif state == 'r':
972 radd(fn)
985 radd(fn)
973
986
974 return (lookup, scmutil.status(modified, added, removed, deleted,
987 return (lookup, scmutil.status(modified, added, removed, deleted,
975 unknown, ignored, clean))
988 unknown, ignored, clean))
976
989
977 def matches(self, match):
990 def matches(self, match):
978 '''
991 '''
979 return files in the dirstate (in whatever state) filtered by match
992 return files in the dirstate (in whatever state) filtered by match
980 '''
993 '''
981 dmap = self._map
994 dmap = self._map
982 if match.always():
995 if match.always():
983 return dmap.keys()
996 return dmap.keys()
984 files = match.files()
997 files = match.files()
985 if match.isexact():
998 if match.isexact():
986 # fast path -- filter the other way around, since typically files is
999 # fast path -- filter the other way around, since typically files is
987 # much smaller than dmap
1000 # much smaller than dmap
988 return [f for f in files if f in dmap]
1001 return [f for f in files if f in dmap]
989 if match.prefix() and all(fn in dmap for fn in files):
1002 if match.prefix() and all(fn in dmap for fn in files):
990 # fast path -- all the values are known to be files, so just return
1003 # fast path -- all the values are known to be files, so just return
991 # that
1004 # that
992 return list(files)
1005 return list(files)
993 return [f for f in dmap if match(f)]
1006 return [f for f in dmap if match(f)]
General Comments 0
You need to be logged in to leave comments. Login now