##// END OF EJS Templates
dirstate: fix debug.dirstate.delaywrite to use the new "now" after sleeping...
Mads Kiilerich -
r30224:ad56071b stable
parent child Browse files
Show More
@@ -1,1258 +1,1259
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29 filecache = scmutil.filecache
29 filecache = scmutil.filecache
30 _rangemask = 0x7fffffff
30 _rangemask = 0x7fffffff
31
31
32 dirstatetuple = parsers.dirstatetuple
32 dirstatetuple = parsers.dirstatetuple
33
33
34 class repocache(filecache):
34 class repocache(filecache):
35 """filecache for files in .hg/"""
35 """filecache for files in .hg/"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj._opener.join(fname)
37 return obj._opener.join(fname)
38
38
39 class rootcache(filecache):
39 class rootcache(filecache):
40 """filecache for files in the repository root"""
40 """filecache for files in the repository root"""
41 def join(self, obj, fname):
41 def join(self, obj, fname):
42 return obj._join(fname)
42 return obj._join(fname)
43
43
44 def _getfsnow(vfs):
44 def _getfsnow(vfs):
45 '''Get "now" timestamp on filesystem'''
45 '''Get "now" timestamp on filesystem'''
46 tmpfd, tmpname = vfs.mkstemp()
46 tmpfd, tmpname = vfs.mkstemp()
47 try:
47 try:
48 return os.fstat(tmpfd).st_mtime
48 return os.fstat(tmpfd).st_mtime
49 finally:
49 finally:
50 os.close(tmpfd)
50 os.close(tmpfd)
51 vfs.unlink(tmpname)
51 vfs.unlink(tmpname)
52
52
53 def nonnormalentries(dmap):
53 def nonnormalentries(dmap):
54 '''Compute the nonnormal dirstate entries from the dmap'''
54 '''Compute the nonnormal dirstate entries from the dmap'''
55 try:
55 try:
56 return parsers.nonnormalentries(dmap)
56 return parsers.nonnormalentries(dmap)
57 except AttributeError:
57 except AttributeError:
58 return set(fname for fname, e in dmap.iteritems()
58 return set(fname for fname, e in dmap.iteritems()
59 if e[0] != 'n' or e[3] == -1)
59 if e[0] != 'n' or e[3] == -1)
60
60
61 def _trypending(root, vfs, filename):
61 def _trypending(root, vfs, filename):
62 '''Open file to be read according to HG_PENDING environment variable
62 '''Open file to be read according to HG_PENDING environment variable
63
63
64 This opens '.pending' of specified 'filename' only when HG_PENDING
64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 is equal to 'root'.
65 is equal to 'root'.
66
66
67 This returns '(fp, is_pending_opened)' tuple.
67 This returns '(fp, is_pending_opened)' tuple.
68 '''
68 '''
69 if root == os.environ.get('HG_PENDING'):
69 if root == os.environ.get('HG_PENDING'):
70 try:
70 try:
71 return (vfs('%s.pending' % filename), True)
71 return (vfs('%s.pending' % filename), True)
72 except IOError as inst:
72 except IOError as inst:
73 if inst.errno != errno.ENOENT:
73 if inst.errno != errno.ENOENT:
74 raise
74 raise
75 return (vfs(filename), False)
75 return (vfs(filename), False)
76
76
77 class dirstate(object):
77 class dirstate(object):
78
78
79 def __init__(self, opener, ui, root, validate):
79 def __init__(self, opener, ui, root, validate):
80 '''Create a new dirstate object.
80 '''Create a new dirstate object.
81
81
82 opener is an open()-like callable that can be used to open the
82 opener is an open()-like callable that can be used to open the
83 dirstate file; root is the root of the directory tracked by
83 dirstate file; root is the root of the directory tracked by
84 the dirstate.
84 the dirstate.
85 '''
85 '''
86 self._opener = opener
86 self._opener = opener
87 self._validate = validate
87 self._validate = validate
88 self._root = root
88 self._root = root
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # UNC path pointing to root share (issue4557)
90 # UNC path pointing to root share (issue4557)
91 self._rootdir = pathutil.normasprefix(root)
91 self._rootdir = pathutil.normasprefix(root)
92 # internal config: ui.forcecwd
92 # internal config: ui.forcecwd
93 forcecwd = ui.config('ui', 'forcecwd')
93 forcecwd = ui.config('ui', 'forcecwd')
94 if forcecwd:
94 if forcecwd:
95 self._cwd = forcecwd
95 self._cwd = forcecwd
96 self._dirty = False
96 self._dirty = False
97 self._dirtypl = False
97 self._dirtypl = False
98 self._lastnormaltime = 0
98 self._lastnormaltime = 0
99 self._ui = ui
99 self._ui = ui
100 self._filecache = {}
100 self._filecache = {}
101 self._parentwriters = 0
101 self._parentwriters = 0
102 self._filename = 'dirstate'
102 self._filename = 'dirstate'
103 self._pendingfilename = '%s.pending' % self._filename
103 self._pendingfilename = '%s.pending' % self._filename
104 self._plchangecallbacks = {}
104 self._plchangecallbacks = {}
105 self._origpl = None
105 self._origpl = None
106
106
107 # for consistent view between _pl() and _read() invocations
107 # for consistent view between _pl() and _read() invocations
108 self._pendingmode = None
108 self._pendingmode = None
109
109
110 def beginparentchange(self):
110 def beginparentchange(self):
111 '''Marks the beginning of a set of changes that involve changing
111 '''Marks the beginning of a set of changes that involve changing
112 the dirstate parents. If there is an exception during this time,
112 the dirstate parents. If there is an exception during this time,
113 the dirstate will not be written when the wlock is released. This
113 the dirstate will not be written when the wlock is released. This
114 prevents writing an incoherent dirstate where the parent doesn't
114 prevents writing an incoherent dirstate where the parent doesn't
115 match the contents.
115 match the contents.
116 '''
116 '''
117 self._parentwriters += 1
117 self._parentwriters += 1
118
118
119 def endparentchange(self):
119 def endparentchange(self):
120 '''Marks the end of a set of changes that involve changing the
120 '''Marks the end of a set of changes that involve changing the
121 dirstate parents. Once all parent changes have been marked done,
121 dirstate parents. Once all parent changes have been marked done,
122 the wlock will be free to write the dirstate on release.
122 the wlock will be free to write the dirstate on release.
123 '''
123 '''
124 if self._parentwriters > 0:
124 if self._parentwriters > 0:
125 self._parentwriters -= 1
125 self._parentwriters -= 1
126
126
127 def pendingparentchange(self):
127 def pendingparentchange(self):
128 '''Returns true if the dirstate is in the middle of a set of changes
128 '''Returns true if the dirstate is in the middle of a set of changes
129 that modify the dirstate parent.
129 that modify the dirstate parent.
130 '''
130 '''
131 return self._parentwriters > 0
131 return self._parentwriters > 0
132
132
133 @propertycache
133 @propertycache
134 def _map(self):
134 def _map(self):
135 '''Return the dirstate contents as a map from filename to
135 '''Return the dirstate contents as a map from filename to
136 (state, mode, size, time).'''
136 (state, mode, size, time).'''
137 self._read()
137 self._read()
138 return self._map
138 return self._map
139
139
140 @propertycache
140 @propertycache
141 def _copymap(self):
141 def _copymap(self):
142 self._read()
142 self._read()
143 return self._copymap
143 return self._copymap
144
144
145 @propertycache
145 @propertycache
146 def _nonnormalset(self):
146 def _nonnormalset(self):
147 return nonnormalentries(self._map)
147 return nonnormalentries(self._map)
148
148
149 @propertycache
149 @propertycache
150 def _filefoldmap(self):
150 def _filefoldmap(self):
151 try:
151 try:
152 makefilefoldmap = parsers.make_file_foldmap
152 makefilefoldmap = parsers.make_file_foldmap
153 except AttributeError:
153 except AttributeError:
154 pass
154 pass
155 else:
155 else:
156 return makefilefoldmap(self._map, util.normcasespec,
156 return makefilefoldmap(self._map, util.normcasespec,
157 util.normcasefallback)
157 util.normcasefallback)
158
158
159 f = {}
159 f = {}
160 normcase = util.normcase
160 normcase = util.normcase
161 for name, s in self._map.iteritems():
161 for name, s in self._map.iteritems():
162 if s[0] != 'r':
162 if s[0] != 'r':
163 f[normcase(name)] = name
163 f[normcase(name)] = name
164 f['.'] = '.' # prevents useless util.fspath() invocation
164 f['.'] = '.' # prevents useless util.fspath() invocation
165 return f
165 return f
166
166
167 @propertycache
167 @propertycache
168 def _dirfoldmap(self):
168 def _dirfoldmap(self):
169 f = {}
169 f = {}
170 normcase = util.normcase
170 normcase = util.normcase
171 for name in self._dirs:
171 for name in self._dirs:
172 f[normcase(name)] = name
172 f[normcase(name)] = name
173 return f
173 return f
174
174
175 @repocache('branch')
175 @repocache('branch')
176 def _branch(self):
176 def _branch(self):
177 try:
177 try:
178 return self._opener.read("branch").strip() or "default"
178 return self._opener.read("branch").strip() or "default"
179 except IOError as inst:
179 except IOError as inst:
180 if inst.errno != errno.ENOENT:
180 if inst.errno != errno.ENOENT:
181 raise
181 raise
182 return "default"
182 return "default"
183
183
184 @propertycache
184 @propertycache
185 def _pl(self):
185 def _pl(self):
186 try:
186 try:
187 fp = self._opendirstatefile()
187 fp = self._opendirstatefile()
188 st = fp.read(40)
188 st = fp.read(40)
189 fp.close()
189 fp.close()
190 l = len(st)
190 l = len(st)
191 if l == 40:
191 if l == 40:
192 return st[:20], st[20:40]
192 return st[:20], st[20:40]
193 elif l > 0 and l < 40:
193 elif l > 0 and l < 40:
194 raise error.Abort(_('working directory state appears damaged!'))
194 raise error.Abort(_('working directory state appears damaged!'))
195 except IOError as err:
195 except IOError as err:
196 if err.errno != errno.ENOENT:
196 if err.errno != errno.ENOENT:
197 raise
197 raise
198 return [nullid, nullid]
198 return [nullid, nullid]
199
199
200 @propertycache
200 @propertycache
201 def _dirs(self):
201 def _dirs(self):
202 return util.dirs(self._map, 'r')
202 return util.dirs(self._map, 'r')
203
203
204 def dirs(self):
204 def dirs(self):
205 return self._dirs
205 return self._dirs
206
206
207 @rootcache('.hgignore')
207 @rootcache('.hgignore')
208 def _ignore(self):
208 def _ignore(self):
209 files = self._ignorefiles()
209 files = self._ignorefiles()
210 if not files:
210 if not files:
211 return util.never
211 return util.never
212
212
213 pats = ['include:%s' % f for f in files]
213 pats = ['include:%s' % f for f in files]
214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215
215
216 @propertycache
216 @propertycache
217 def _slash(self):
217 def _slash(self):
218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
219
219
220 @propertycache
220 @propertycache
221 def _checklink(self):
221 def _checklink(self):
222 return util.checklink(self._root)
222 return util.checklink(self._root)
223
223
224 @propertycache
224 @propertycache
225 def _checkexec(self):
225 def _checkexec(self):
226 return util.checkexec(self._root)
226 return util.checkexec(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkcase(self):
229 def _checkcase(self):
230 return not util.fscasesensitive(self._join('.hg'))
230 return not util.fscasesensitive(self._join('.hg'))
231
231
232 def _join(self, f):
232 def _join(self, f):
233 # much faster than os.path.join()
233 # much faster than os.path.join()
234 # it's safe because f is always a relative path
234 # it's safe because f is always a relative path
235 return self._rootdir + f
235 return self._rootdir + f
236
236
237 def flagfunc(self, buildfallback):
237 def flagfunc(self, buildfallback):
238 if self._checklink and self._checkexec:
238 if self._checklink and self._checkexec:
239 def f(x):
239 def f(x):
240 try:
240 try:
241 st = os.lstat(self._join(x))
241 st = os.lstat(self._join(x))
242 if util.statislink(st):
242 if util.statislink(st):
243 return 'l'
243 return 'l'
244 if util.statisexec(st):
244 if util.statisexec(st):
245 return 'x'
245 return 'x'
246 except OSError:
246 except OSError:
247 pass
247 pass
248 return ''
248 return ''
249 return f
249 return f
250
250
251 fallback = buildfallback()
251 fallback = buildfallback()
252 if self._checklink:
252 if self._checklink:
253 def f(x):
253 def f(x):
254 if os.path.islink(self._join(x)):
254 if os.path.islink(self._join(x)):
255 return 'l'
255 return 'l'
256 if 'x' in fallback(x):
256 if 'x' in fallback(x):
257 return 'x'
257 return 'x'
258 return ''
258 return ''
259 return f
259 return f
260 if self._checkexec:
260 if self._checkexec:
261 def f(x):
261 def f(x):
262 if 'l' in fallback(x):
262 if 'l' in fallback(x):
263 return 'l'
263 return 'l'
264 if util.isexec(self._join(x)):
264 if util.isexec(self._join(x)):
265 return 'x'
265 return 'x'
266 return ''
266 return ''
267 return f
267 return f
268 else:
268 else:
269 return fallback
269 return fallback
270
270
271 @propertycache
271 @propertycache
272 def _cwd(self):
272 def _cwd(self):
273 return os.getcwd()
273 return os.getcwd()
274
274
275 def getcwd(self):
275 def getcwd(self):
276 '''Return the path from which a canonical path is calculated.
276 '''Return the path from which a canonical path is calculated.
277
277
278 This path should be used to resolve file patterns or to convert
278 This path should be used to resolve file patterns or to convert
279 canonical paths back to file paths for display. It shouldn't be
279 canonical paths back to file paths for display. It shouldn't be
280 used to get real file paths. Use vfs functions instead.
280 used to get real file paths. Use vfs functions instead.
281 '''
281 '''
282 cwd = self._cwd
282 cwd = self._cwd
283 if cwd == self._root:
283 if cwd == self._root:
284 return ''
284 return ''
285 # self._root ends with a path separator if self._root is '/' or 'C:\'
285 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 rootsep = self._root
286 rootsep = self._root
287 if not util.endswithsep(rootsep):
287 if not util.endswithsep(rootsep):
288 rootsep += os.sep
288 rootsep += os.sep
289 if cwd.startswith(rootsep):
289 if cwd.startswith(rootsep):
290 return cwd[len(rootsep):]
290 return cwd[len(rootsep):]
291 else:
291 else:
292 # we're outside the repo. return an absolute path.
292 # we're outside the repo. return an absolute path.
293 return cwd
293 return cwd
294
294
295 def pathto(self, f, cwd=None):
295 def pathto(self, f, cwd=None):
296 if cwd is None:
296 if cwd is None:
297 cwd = self.getcwd()
297 cwd = self.getcwd()
298 path = util.pathto(self._root, cwd, f)
298 path = util.pathto(self._root, cwd, f)
299 if self._slash:
299 if self._slash:
300 return util.pconvert(path)
300 return util.pconvert(path)
301 return path
301 return path
302
302
303 def __getitem__(self, key):
303 def __getitem__(self, key):
304 '''Return the current state of key (a filename) in the dirstate.
304 '''Return the current state of key (a filename) in the dirstate.
305
305
306 States are:
306 States are:
307 n normal
307 n normal
308 m needs merging
308 m needs merging
309 r marked for removal
309 r marked for removal
310 a marked for addition
310 a marked for addition
311 ? not tracked
311 ? not tracked
312 '''
312 '''
313 return self._map.get(key, ("?",))[0]
313 return self._map.get(key, ("?",))[0]
314
314
315 def __contains__(self, key):
315 def __contains__(self, key):
316 return key in self._map
316 return key in self._map
317
317
318 def __iter__(self):
318 def __iter__(self):
319 for x in sorted(self._map):
319 for x in sorted(self._map):
320 yield x
320 yield x
321
321
322 def iteritems(self):
322 def iteritems(self):
323 return self._map.iteritems()
323 return self._map.iteritems()
324
324
325 def parents(self):
325 def parents(self):
326 return [self._validate(p) for p in self._pl]
326 return [self._validate(p) for p in self._pl]
327
327
328 def p1(self):
328 def p1(self):
329 return self._validate(self._pl[0])
329 return self._validate(self._pl[0])
330
330
331 def p2(self):
331 def p2(self):
332 return self._validate(self._pl[1])
332 return self._validate(self._pl[1])
333
333
334 def branch(self):
334 def branch(self):
335 return encoding.tolocal(self._branch)
335 return encoding.tolocal(self._branch)
336
336
337 def setparents(self, p1, p2=nullid):
337 def setparents(self, p1, p2=nullid):
338 """Set dirstate parents to p1 and p2.
338 """Set dirstate parents to p1 and p2.
339
339
340 When moving from two parents to one, 'm' merged entries a
340 When moving from two parents to one, 'm' merged entries a
341 adjusted to normal and previous copy records discarded and
341 adjusted to normal and previous copy records discarded and
342 returned by the call.
342 returned by the call.
343
343
344 See localrepo.setparents()
344 See localrepo.setparents()
345 """
345 """
346 if self._parentwriters == 0:
346 if self._parentwriters == 0:
347 raise ValueError("cannot set dirstate parent without "
347 raise ValueError("cannot set dirstate parent without "
348 "calling dirstate.beginparentchange")
348 "calling dirstate.beginparentchange")
349
349
350 self._dirty = self._dirtypl = True
350 self._dirty = self._dirtypl = True
351 oldp2 = self._pl[1]
351 oldp2 = self._pl[1]
352 if self._origpl is None:
352 if self._origpl is None:
353 self._origpl = self._pl
353 self._origpl = self._pl
354 self._pl = p1, p2
354 self._pl = p1, p2
355 copies = {}
355 copies = {}
356 if oldp2 != nullid and p2 == nullid:
356 if oldp2 != nullid and p2 == nullid:
357 for f, s in self._map.iteritems():
357 for f, s in self._map.iteritems():
358 # Discard 'm' markers when moving away from a merge state
358 # Discard 'm' markers when moving away from a merge state
359 if s[0] == 'm':
359 if s[0] == 'm':
360 if f in self._copymap:
360 if f in self._copymap:
361 copies[f] = self._copymap[f]
361 copies[f] = self._copymap[f]
362 self.normallookup(f)
362 self.normallookup(f)
363 # Also fix up otherparent markers
363 # Also fix up otherparent markers
364 elif s[0] == 'n' and s[2] == -2:
364 elif s[0] == 'n' and s[2] == -2:
365 if f in self._copymap:
365 if f in self._copymap:
366 copies[f] = self._copymap[f]
366 copies[f] = self._copymap[f]
367 self.add(f)
367 self.add(f)
368 return copies
368 return copies
369
369
370 def setbranch(self, branch):
370 def setbranch(self, branch):
371 self._branch = encoding.fromlocal(branch)
371 self._branch = encoding.fromlocal(branch)
372 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
372 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
373 try:
373 try:
374 f.write(self._branch + '\n')
374 f.write(self._branch + '\n')
375 f.close()
375 f.close()
376
376
377 # make sure filecache has the correct stat info for _branch after
377 # make sure filecache has the correct stat info for _branch after
378 # replacing the underlying file
378 # replacing the underlying file
379 ce = self._filecache['_branch']
379 ce = self._filecache['_branch']
380 if ce:
380 if ce:
381 ce.refresh()
381 ce.refresh()
382 except: # re-raises
382 except: # re-raises
383 f.discard()
383 f.discard()
384 raise
384 raise
385
385
386 def _opendirstatefile(self):
386 def _opendirstatefile(self):
387 fp, mode = _trypending(self._root, self._opener, self._filename)
387 fp, mode = _trypending(self._root, self._opener, self._filename)
388 if self._pendingmode is not None and self._pendingmode != mode:
388 if self._pendingmode is not None and self._pendingmode != mode:
389 fp.close()
389 fp.close()
390 raise error.Abort(_('working directory state may be '
390 raise error.Abort(_('working directory state may be '
391 'changed parallelly'))
391 'changed parallelly'))
392 self._pendingmode = mode
392 self._pendingmode = mode
393 return fp
393 return fp
394
394
395 def _read(self):
395 def _read(self):
396 self._map = {}
396 self._map = {}
397 self._copymap = {}
397 self._copymap = {}
398 try:
398 try:
399 fp = self._opendirstatefile()
399 fp = self._opendirstatefile()
400 try:
400 try:
401 st = fp.read()
401 st = fp.read()
402 finally:
402 finally:
403 fp.close()
403 fp.close()
404 except IOError as err:
404 except IOError as err:
405 if err.errno != errno.ENOENT:
405 if err.errno != errno.ENOENT:
406 raise
406 raise
407 return
407 return
408 if not st:
408 if not st:
409 return
409 return
410
410
411 if util.safehasattr(parsers, 'dict_new_presized'):
411 if util.safehasattr(parsers, 'dict_new_presized'):
412 # Make an estimate of the number of files in the dirstate based on
412 # Make an estimate of the number of files in the dirstate based on
413 # its size. From a linear regression on a set of real-world repos,
413 # its size. From a linear regression on a set of real-world repos,
414 # all over 10,000 files, the size of a dirstate entry is 85
414 # all over 10,000 files, the size of a dirstate entry is 85
415 # bytes. The cost of resizing is significantly higher than the cost
415 # bytes. The cost of resizing is significantly higher than the cost
416 # of filling in a larger presized dict, so subtract 20% from the
416 # of filling in a larger presized dict, so subtract 20% from the
417 # size.
417 # size.
418 #
418 #
419 # This heuristic is imperfect in many ways, so in a future dirstate
419 # This heuristic is imperfect in many ways, so in a future dirstate
420 # format update it makes sense to just record the number of entries
420 # format update it makes sense to just record the number of entries
421 # on write.
421 # on write.
422 self._map = parsers.dict_new_presized(len(st) / 71)
422 self._map = parsers.dict_new_presized(len(st) / 71)
423
423
424 # Python's garbage collector triggers a GC each time a certain number
424 # Python's garbage collector triggers a GC each time a certain number
425 # of container objects (the number being defined by
425 # of container objects (the number being defined by
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 # for each file in the dirstate. The C version then immediately marks
427 # for each file in the dirstate. The C version then immediately marks
428 # them as not to be tracked by the collector. However, this has no
428 # them as not to be tracked by the collector. However, this has no
429 # effect on when GCs are triggered, only on what objects the GC looks
429 # effect on when GCs are triggered, only on what objects the GC looks
430 # into. This means that O(number of files) GCs are unavoidable.
430 # into. This means that O(number of files) GCs are unavoidable.
431 # Depending on when in the process's lifetime the dirstate is parsed,
431 # Depending on when in the process's lifetime the dirstate is parsed,
432 # this can get very expensive. As a workaround, disable GC while
432 # this can get very expensive. As a workaround, disable GC while
433 # parsing the dirstate.
433 # parsing the dirstate.
434 #
434 #
435 # (we cannot decorate the function directly since it is in a C module)
435 # (we cannot decorate the function directly since it is in a C module)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 p = parse_dirstate(self._map, self._copymap, st)
437 p = parse_dirstate(self._map, self._copymap, st)
438 if not self._dirtypl:
438 if not self._dirtypl:
439 self._pl = p
439 self._pl = p
440
440
441 def invalidate(self):
441 def invalidate(self):
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 if a in self.__dict__:
444 if a in self.__dict__:
445 delattr(self, a)
445 delattr(self, a)
446 self._lastnormaltime = 0
446 self._lastnormaltime = 0
447 self._dirty = False
447 self._dirty = False
448 self._parentwriters = 0
448 self._parentwriters = 0
449 self._origpl = None
449 self._origpl = None
450
450
451 def copy(self, source, dest):
451 def copy(self, source, dest):
452 """Mark dest as a copy of source. Unmark dest if source is None."""
452 """Mark dest as a copy of source. Unmark dest if source is None."""
453 if source == dest:
453 if source == dest:
454 return
454 return
455 self._dirty = True
455 self._dirty = True
456 if source is not None:
456 if source is not None:
457 self._copymap[dest] = source
457 self._copymap[dest] = source
458 elif dest in self._copymap:
458 elif dest in self._copymap:
459 del self._copymap[dest]
459 del self._copymap[dest]
460
460
461 def copied(self, file):
461 def copied(self, file):
462 return self._copymap.get(file, None)
462 return self._copymap.get(file, None)
463
463
464 def copies(self):
464 def copies(self):
465 return self._copymap
465 return self._copymap
466
466
467 def _droppath(self, f):
467 def _droppath(self, f):
468 if self[f] not in "?r" and "_dirs" in self.__dict__:
468 if self[f] not in "?r" and "_dirs" in self.__dict__:
469 self._dirs.delpath(f)
469 self._dirs.delpath(f)
470
470
471 if "_filefoldmap" in self.__dict__:
471 if "_filefoldmap" in self.__dict__:
472 normed = util.normcase(f)
472 normed = util.normcase(f)
473 if normed in self._filefoldmap:
473 if normed in self._filefoldmap:
474 del self._filefoldmap[normed]
474 del self._filefoldmap[normed]
475
475
476 def _addpath(self, f, state, mode, size, mtime):
476 def _addpath(self, f, state, mode, size, mtime):
477 oldstate = self[f]
477 oldstate = self[f]
478 if state == 'a' or oldstate == 'r':
478 if state == 'a' or oldstate == 'r':
479 scmutil.checkfilename(f)
479 scmutil.checkfilename(f)
480 if f in self._dirs:
480 if f in self._dirs:
481 raise error.Abort(_('directory %r already in dirstate') % f)
481 raise error.Abort(_('directory %r already in dirstate') % f)
482 # shadows
482 # shadows
483 for d in util.finddirs(f):
483 for d in util.finddirs(f):
484 if d in self._dirs:
484 if d in self._dirs:
485 break
485 break
486 if d in self._map and self[d] != 'r':
486 if d in self._map and self[d] != 'r':
487 raise error.Abort(
487 raise error.Abort(
488 _('file %r in dirstate clashes with %r') % (d, f))
488 _('file %r in dirstate clashes with %r') % (d, f))
489 if oldstate in "?r" and "_dirs" in self.__dict__:
489 if oldstate in "?r" and "_dirs" in self.__dict__:
490 self._dirs.addpath(f)
490 self._dirs.addpath(f)
491 self._dirty = True
491 self._dirty = True
492 self._map[f] = dirstatetuple(state, mode, size, mtime)
492 self._map[f] = dirstatetuple(state, mode, size, mtime)
493 if state != 'n' or mtime == -1:
493 if state != 'n' or mtime == -1:
494 self._nonnormalset.add(f)
494 self._nonnormalset.add(f)
495
495
496 def normal(self, f):
496 def normal(self, f):
497 '''Mark a file normal and clean.'''
497 '''Mark a file normal and clean.'''
498 s = os.lstat(self._join(f))
498 s = os.lstat(self._join(f))
499 mtime = s.st_mtime
499 mtime = s.st_mtime
500 self._addpath(f, 'n', s.st_mode,
500 self._addpath(f, 'n', s.st_mode,
501 s.st_size & _rangemask, mtime & _rangemask)
501 s.st_size & _rangemask, mtime & _rangemask)
502 if f in self._copymap:
502 if f in self._copymap:
503 del self._copymap[f]
503 del self._copymap[f]
504 if f in self._nonnormalset:
504 if f in self._nonnormalset:
505 self._nonnormalset.remove(f)
505 self._nonnormalset.remove(f)
506 if mtime > self._lastnormaltime:
506 if mtime > self._lastnormaltime:
507 # Remember the most recent modification timeslot for status(),
507 # Remember the most recent modification timeslot for status(),
508 # to make sure we won't miss future size-preserving file content
508 # to make sure we won't miss future size-preserving file content
509 # modifications that happen within the same timeslot.
509 # modifications that happen within the same timeslot.
510 self._lastnormaltime = mtime
510 self._lastnormaltime = mtime
511
511
512 def normallookup(self, f):
512 def normallookup(self, f):
513 '''Mark a file normal, but possibly dirty.'''
513 '''Mark a file normal, but possibly dirty.'''
514 if self._pl[1] != nullid and f in self._map:
514 if self._pl[1] != nullid and f in self._map:
515 # if there is a merge going on and the file was either
515 # if there is a merge going on and the file was either
516 # in state 'm' (-1) or coming from other parent (-2) before
516 # in state 'm' (-1) or coming from other parent (-2) before
517 # being removed, restore that state.
517 # being removed, restore that state.
518 entry = self._map[f]
518 entry = self._map[f]
519 if entry[0] == 'r' and entry[2] in (-1, -2):
519 if entry[0] == 'r' and entry[2] in (-1, -2):
520 source = self._copymap.get(f)
520 source = self._copymap.get(f)
521 if entry[2] == -1:
521 if entry[2] == -1:
522 self.merge(f)
522 self.merge(f)
523 elif entry[2] == -2:
523 elif entry[2] == -2:
524 self.otherparent(f)
524 self.otherparent(f)
525 if source:
525 if source:
526 self.copy(source, f)
526 self.copy(source, f)
527 return
527 return
528 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
528 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
529 return
529 return
530 self._addpath(f, 'n', 0, -1, -1)
530 self._addpath(f, 'n', 0, -1, -1)
531 if f in self._copymap:
531 if f in self._copymap:
532 del self._copymap[f]
532 del self._copymap[f]
533 if f in self._nonnormalset:
533 if f in self._nonnormalset:
534 self._nonnormalset.remove(f)
534 self._nonnormalset.remove(f)
535
535
536 def otherparent(self, f):
536 def otherparent(self, f):
537 '''Mark as coming from the other parent, always dirty.'''
537 '''Mark as coming from the other parent, always dirty.'''
538 if self._pl[1] == nullid:
538 if self._pl[1] == nullid:
539 raise error.Abort(_("setting %r to other parent "
539 raise error.Abort(_("setting %r to other parent "
540 "only allowed in merges") % f)
540 "only allowed in merges") % f)
541 if f in self and self[f] == 'n':
541 if f in self and self[f] == 'n':
542 # merge-like
542 # merge-like
543 self._addpath(f, 'm', 0, -2, -1)
543 self._addpath(f, 'm', 0, -2, -1)
544 else:
544 else:
545 # add-like
545 # add-like
546 self._addpath(f, 'n', 0, -2, -1)
546 self._addpath(f, 'n', 0, -2, -1)
547
547
548 if f in self._copymap:
548 if f in self._copymap:
549 del self._copymap[f]
549 del self._copymap[f]
550
550
551 def add(self, f):
551 def add(self, f):
552 '''Mark a file added.'''
552 '''Mark a file added.'''
553 self._addpath(f, 'a', 0, -1, -1)
553 self._addpath(f, 'a', 0, -1, -1)
554 if f in self._copymap:
554 if f in self._copymap:
555 del self._copymap[f]
555 del self._copymap[f]
556
556
557 def remove(self, f):
557 def remove(self, f):
558 '''Mark a file removed.'''
558 '''Mark a file removed.'''
559 self._dirty = True
559 self._dirty = True
560 self._droppath(f)
560 self._droppath(f)
561 size = 0
561 size = 0
562 if self._pl[1] != nullid and f in self._map:
562 if self._pl[1] != nullid and f in self._map:
563 # backup the previous state
563 # backup the previous state
564 entry = self._map[f]
564 entry = self._map[f]
565 if entry[0] == 'm': # merge
565 if entry[0] == 'm': # merge
566 size = -1
566 size = -1
567 elif entry[0] == 'n' and entry[2] == -2: # other parent
567 elif entry[0] == 'n' and entry[2] == -2: # other parent
568 size = -2
568 size = -2
569 self._map[f] = dirstatetuple('r', 0, size, 0)
569 self._map[f] = dirstatetuple('r', 0, size, 0)
570 self._nonnormalset.add(f)
570 self._nonnormalset.add(f)
571 if size == 0 and f in self._copymap:
571 if size == 0 and f in self._copymap:
572 del self._copymap[f]
572 del self._copymap[f]
573
573
574 def merge(self, f):
574 def merge(self, f):
575 '''Mark a file merged.'''
575 '''Mark a file merged.'''
576 if self._pl[1] == nullid:
576 if self._pl[1] == nullid:
577 return self.normallookup(f)
577 return self.normallookup(f)
578 return self.otherparent(f)
578 return self.otherparent(f)
579
579
580 def drop(self, f):
580 def drop(self, f):
581 '''Drop a file from the dirstate'''
581 '''Drop a file from the dirstate'''
582 if f in self._map:
582 if f in self._map:
583 self._dirty = True
583 self._dirty = True
584 self._droppath(f)
584 self._droppath(f)
585 del self._map[f]
585 del self._map[f]
586 if f in self._nonnormalset:
586 if f in self._nonnormalset:
587 self._nonnormalset.remove(f)
587 self._nonnormalset.remove(f)
588 if f in self._copymap:
588 if f in self._copymap:
589 del self._copymap[f]
589 del self._copymap[f]
590
590
591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 if exists is None:
592 if exists is None:
593 exists = os.path.lexists(os.path.join(self._root, path))
593 exists = os.path.lexists(os.path.join(self._root, path))
594 if not exists:
594 if not exists:
595 # Maybe a path component exists
595 # Maybe a path component exists
596 if not ignoremissing and '/' in path:
596 if not ignoremissing and '/' in path:
597 d, f = path.rsplit('/', 1)
597 d, f = path.rsplit('/', 1)
598 d = self._normalize(d, False, ignoremissing, None)
598 d = self._normalize(d, False, ignoremissing, None)
599 folded = d + "/" + f
599 folded = d + "/" + f
600 else:
600 else:
601 # No path components, preserve original case
601 # No path components, preserve original case
602 folded = path
602 folded = path
603 else:
603 else:
604 # recursively normalize leading directory components
604 # recursively normalize leading directory components
605 # against dirstate
605 # against dirstate
606 if '/' in normed:
606 if '/' in normed:
607 d, f = normed.rsplit('/', 1)
607 d, f = normed.rsplit('/', 1)
608 d = self._normalize(d, False, ignoremissing, True)
608 d = self._normalize(d, False, ignoremissing, True)
609 r = self._root + "/" + d
609 r = self._root + "/" + d
610 folded = d + "/" + util.fspath(f, r)
610 folded = d + "/" + util.fspath(f, r)
611 else:
611 else:
612 folded = util.fspath(normed, self._root)
612 folded = util.fspath(normed, self._root)
613 storemap[normed] = folded
613 storemap[normed] = folded
614
614
615 return folded
615 return folded
616
616
617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 normed = util.normcase(path)
618 normed = util.normcase(path)
619 folded = self._filefoldmap.get(normed, None)
619 folded = self._filefoldmap.get(normed, None)
620 if folded is None:
620 if folded is None:
621 if isknown:
621 if isknown:
622 folded = path
622 folded = path
623 else:
623 else:
624 folded = self._discoverpath(path, normed, ignoremissing, exists,
624 folded = self._discoverpath(path, normed, ignoremissing, exists,
625 self._filefoldmap)
625 self._filefoldmap)
626 return folded
626 return folded
627
627
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
629 normed = util.normcase(path)
629 normed = util.normcase(path)
630 folded = self._filefoldmap.get(normed, None)
630 folded = self._filefoldmap.get(normed, None)
631 if folded is None:
631 if folded is None:
632 folded = self._dirfoldmap.get(normed, None)
632 folded = self._dirfoldmap.get(normed, None)
633 if folded is None:
633 if folded is None:
634 if isknown:
634 if isknown:
635 folded = path
635 folded = path
636 else:
636 else:
637 # store discovered result in dirfoldmap so that future
637 # store discovered result in dirfoldmap so that future
638 # normalizefile calls don't start matching directories
638 # normalizefile calls don't start matching directories
639 folded = self._discoverpath(path, normed, ignoremissing, exists,
639 folded = self._discoverpath(path, normed, ignoremissing, exists,
640 self._dirfoldmap)
640 self._dirfoldmap)
641 return folded
641 return folded
642
642
643 def normalize(self, path, isknown=False, ignoremissing=False):
643 def normalize(self, path, isknown=False, ignoremissing=False):
644 '''
644 '''
645 normalize the case of a pathname when on a casefolding filesystem
645 normalize the case of a pathname when on a casefolding filesystem
646
646
647 isknown specifies whether the filename came from walking the
647 isknown specifies whether the filename came from walking the
648 disk, to avoid extra filesystem access.
648 disk, to avoid extra filesystem access.
649
649
650 If ignoremissing is True, missing path are returned
650 If ignoremissing is True, missing path are returned
651 unchanged. Otherwise, we try harder to normalize possibly
651 unchanged. Otherwise, we try harder to normalize possibly
652 existing path components.
652 existing path components.
653
653
654 The normalized case is determined based on the following precedence:
654 The normalized case is determined based on the following precedence:
655
655
656 - version of name already stored in the dirstate
656 - version of name already stored in the dirstate
657 - version of name stored on disk
657 - version of name stored on disk
658 - version provided via command arguments
658 - version provided via command arguments
659 '''
659 '''
660
660
661 if self._checkcase:
661 if self._checkcase:
662 return self._normalize(path, isknown, ignoremissing)
662 return self._normalize(path, isknown, ignoremissing)
663 return path
663 return path
664
664
665 def clear(self):
665 def clear(self):
666 self._map = {}
666 self._map = {}
667 self._nonnormalset = set()
667 self._nonnormalset = set()
668 if "_dirs" in self.__dict__:
668 if "_dirs" in self.__dict__:
669 delattr(self, "_dirs")
669 delattr(self, "_dirs")
670 self._copymap = {}
670 self._copymap = {}
671 self._pl = [nullid, nullid]
671 self._pl = [nullid, nullid]
672 self._lastnormaltime = 0
672 self._lastnormaltime = 0
673 self._dirty = True
673 self._dirty = True
674
674
675 def rebuild(self, parent, allfiles, changedfiles=None):
675 def rebuild(self, parent, allfiles, changedfiles=None):
676 if changedfiles is None:
676 if changedfiles is None:
677 # Rebuild entire dirstate
677 # Rebuild entire dirstate
678 changedfiles = allfiles
678 changedfiles = allfiles
679 lastnormaltime = self._lastnormaltime
679 lastnormaltime = self._lastnormaltime
680 self.clear()
680 self.clear()
681 self._lastnormaltime = lastnormaltime
681 self._lastnormaltime = lastnormaltime
682
682
683 if self._origpl is None:
683 if self._origpl is None:
684 self._origpl = self._pl
684 self._origpl = self._pl
685 self._pl = (parent, nullid)
685 self._pl = (parent, nullid)
686 for f in changedfiles:
686 for f in changedfiles:
687 if f in allfiles:
687 if f in allfiles:
688 self.normallookup(f)
688 self.normallookup(f)
689 else:
689 else:
690 self.drop(f)
690 self.drop(f)
691
691
692 self._dirty = True
692 self._dirty = True
693
693
694 def write(self, tr):
694 def write(self, tr):
695 if not self._dirty:
695 if not self._dirty:
696 return
696 return
697
697
698 filename = self._filename
698 filename = self._filename
699 if tr:
699 if tr:
700 # 'dirstate.write()' is not only for writing in-memory
700 # 'dirstate.write()' is not only for writing in-memory
701 # changes out, but also for dropping ambiguous timestamp.
701 # changes out, but also for dropping ambiguous timestamp.
702 # delayed writing re-raise "ambiguous timestamp issue".
702 # delayed writing re-raise "ambiguous timestamp issue".
703 # See also the wiki page below for detail:
703 # See also the wiki page below for detail:
704 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
704 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
705
705
706 # emulate dropping timestamp in 'parsers.pack_dirstate'
706 # emulate dropping timestamp in 'parsers.pack_dirstate'
707 now = _getfsnow(self._opener)
707 now = _getfsnow(self._opener)
708 dmap = self._map
708 dmap = self._map
709 for f, e in dmap.iteritems():
709 for f, e in dmap.iteritems():
710 if e[0] == 'n' and e[3] == now:
710 if e[0] == 'n' and e[3] == now:
711 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
711 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
712 self._nonnormalset.add(f)
712 self._nonnormalset.add(f)
713
713
714 # emulate that all 'dirstate.normal' results are written out
714 # emulate that all 'dirstate.normal' results are written out
715 self._lastnormaltime = 0
715 self._lastnormaltime = 0
716
716
717 # delay writing in-memory changes out
717 # delay writing in-memory changes out
718 tr.addfilegenerator('dirstate', (self._filename,),
718 tr.addfilegenerator('dirstate', (self._filename,),
719 self._writedirstate, location='plain')
719 self._writedirstate, location='plain')
720 return
720 return
721
721
722 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
722 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
723 self._writedirstate(st)
723 self._writedirstate(st)
724
724
725 def addparentchangecallback(self, category, callback):
725 def addparentchangecallback(self, category, callback):
726 """add a callback to be called when the wd parents are changed
726 """add a callback to be called when the wd parents are changed
727
727
728 Callback will be called with the following arguments:
728 Callback will be called with the following arguments:
729 dirstate, (oldp1, oldp2), (newp1, newp2)
729 dirstate, (oldp1, oldp2), (newp1, newp2)
730
730
731 Category is a unique identifier to allow overwriting an old callback
731 Category is a unique identifier to allow overwriting an old callback
732 with a newer callback.
732 with a newer callback.
733 """
733 """
734 self._plchangecallbacks[category] = callback
734 self._plchangecallbacks[category] = callback
735
735
736 def _writedirstate(self, st):
736 def _writedirstate(self, st):
737 # notify callbacks about parents change
737 # notify callbacks about parents change
738 if self._origpl is not None and self._origpl != self._pl:
738 if self._origpl is not None and self._origpl != self._pl:
739 for c, callback in sorted(self._plchangecallbacks.iteritems()):
739 for c, callback in sorted(self._plchangecallbacks.iteritems()):
740 callback(self, self._origpl, self._pl)
740 callback(self, self._origpl, self._pl)
741 self._origpl = None
741 self._origpl = None
742 # use the modification time of the newly created temporary file as the
742 # use the modification time of the newly created temporary file as the
743 # filesystem's notion of 'now'
743 # filesystem's notion of 'now'
744 now = util.fstat(st).st_mtime & _rangemask
744 now = util.fstat(st).st_mtime & _rangemask
745
745
746 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
746 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
747 # timestamp of each entries in dirstate, because of 'now > mtime'
747 # timestamp of each entries in dirstate, because of 'now > mtime'
748 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
748 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
749 if delaywrite > 0:
749 if delaywrite > 0:
750 # do we have any files to delay for?
750 # do we have any files to delay for?
751 for f, e in self._map.iteritems():
751 for f, e in self._map.iteritems():
752 if e[0] == 'n' and e[3] == now:
752 if e[0] == 'n' and e[3] == now:
753 import time # to avoid useless import
753 import time # to avoid useless import
754 # rather than sleep n seconds, sleep until the next
754 # rather than sleep n seconds, sleep until the next
755 # multiple of n seconds
755 # multiple of n seconds
756 clock = time.time()
756 clock = time.time()
757 start = int(clock) - (int(clock) % delaywrite)
757 start = int(clock) - (int(clock) % delaywrite)
758 end = start + delaywrite
758 end = start + delaywrite
759 time.sleep(end - clock)
759 time.sleep(end - clock)
760 now = end # trust our estimate that the end is near now
760 break
761 break
761
762
762 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
763 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
763 self._nonnormalset = nonnormalentries(self._map)
764 self._nonnormalset = nonnormalentries(self._map)
764 st.close()
765 st.close()
765 self._lastnormaltime = 0
766 self._lastnormaltime = 0
766 self._dirty = self._dirtypl = False
767 self._dirty = self._dirtypl = False
767
768
768 def _dirignore(self, f):
769 def _dirignore(self, f):
769 if f == '.':
770 if f == '.':
770 return False
771 return False
771 if self._ignore(f):
772 if self._ignore(f):
772 return True
773 return True
773 for p in util.finddirs(f):
774 for p in util.finddirs(f):
774 if self._ignore(p):
775 if self._ignore(p):
775 return True
776 return True
776 return False
777 return False
777
778
778 def _ignorefiles(self):
779 def _ignorefiles(self):
779 files = []
780 files = []
780 if os.path.exists(self._join('.hgignore')):
781 if os.path.exists(self._join('.hgignore')):
781 files.append(self._join('.hgignore'))
782 files.append(self._join('.hgignore'))
782 for name, path in self._ui.configitems("ui"):
783 for name, path in self._ui.configitems("ui"):
783 if name == 'ignore' or name.startswith('ignore.'):
784 if name == 'ignore' or name.startswith('ignore.'):
784 # we need to use os.path.join here rather than self._join
785 # we need to use os.path.join here rather than self._join
785 # because path is arbitrary and user-specified
786 # because path is arbitrary and user-specified
786 files.append(os.path.join(self._rootdir, util.expandpath(path)))
787 files.append(os.path.join(self._rootdir, util.expandpath(path)))
787 return files
788 return files
788
789
789 def _ignorefileandline(self, f):
790 def _ignorefileandline(self, f):
790 files = collections.deque(self._ignorefiles())
791 files = collections.deque(self._ignorefiles())
791 visited = set()
792 visited = set()
792 while files:
793 while files:
793 i = files.popleft()
794 i = files.popleft()
794 patterns = matchmod.readpatternfile(i, self._ui.warn,
795 patterns = matchmod.readpatternfile(i, self._ui.warn,
795 sourceinfo=True)
796 sourceinfo=True)
796 for pattern, lineno, line in patterns:
797 for pattern, lineno, line in patterns:
797 kind, p = matchmod._patsplit(pattern, 'glob')
798 kind, p = matchmod._patsplit(pattern, 'glob')
798 if kind == "subinclude":
799 if kind == "subinclude":
799 if p not in visited:
800 if p not in visited:
800 files.append(p)
801 files.append(p)
801 continue
802 continue
802 m = matchmod.match(self._root, '', [], [pattern],
803 m = matchmod.match(self._root, '', [], [pattern],
803 warn=self._ui.warn)
804 warn=self._ui.warn)
804 if m(f):
805 if m(f):
805 return (i, lineno, line)
806 return (i, lineno, line)
806 visited.add(i)
807 visited.add(i)
807 return (None, -1, "")
808 return (None, -1, "")
808
809
809 def _walkexplicit(self, match, subrepos):
810 def _walkexplicit(self, match, subrepos):
810 '''Get stat data about the files explicitly specified by match.
811 '''Get stat data about the files explicitly specified by match.
811
812
812 Return a triple (results, dirsfound, dirsnotfound).
813 Return a triple (results, dirsfound, dirsnotfound).
813 - results is a mapping from filename to stat result. It also contains
814 - results is a mapping from filename to stat result. It also contains
814 listings mapping subrepos and .hg to None.
815 listings mapping subrepos and .hg to None.
815 - dirsfound is a list of files found to be directories.
816 - dirsfound is a list of files found to be directories.
816 - dirsnotfound is a list of files that the dirstate thinks are
817 - dirsnotfound is a list of files that the dirstate thinks are
817 directories and that were not found.'''
818 directories and that were not found.'''
818
819
819 def badtype(mode):
820 def badtype(mode):
820 kind = _('unknown')
821 kind = _('unknown')
821 if stat.S_ISCHR(mode):
822 if stat.S_ISCHR(mode):
822 kind = _('character device')
823 kind = _('character device')
823 elif stat.S_ISBLK(mode):
824 elif stat.S_ISBLK(mode):
824 kind = _('block device')
825 kind = _('block device')
825 elif stat.S_ISFIFO(mode):
826 elif stat.S_ISFIFO(mode):
826 kind = _('fifo')
827 kind = _('fifo')
827 elif stat.S_ISSOCK(mode):
828 elif stat.S_ISSOCK(mode):
828 kind = _('socket')
829 kind = _('socket')
829 elif stat.S_ISDIR(mode):
830 elif stat.S_ISDIR(mode):
830 kind = _('directory')
831 kind = _('directory')
831 return _('unsupported file type (type is %s)') % kind
832 return _('unsupported file type (type is %s)') % kind
832
833
833 matchedir = match.explicitdir
834 matchedir = match.explicitdir
834 badfn = match.bad
835 badfn = match.bad
835 dmap = self._map
836 dmap = self._map
836 lstat = os.lstat
837 lstat = os.lstat
837 getkind = stat.S_IFMT
838 getkind = stat.S_IFMT
838 dirkind = stat.S_IFDIR
839 dirkind = stat.S_IFDIR
839 regkind = stat.S_IFREG
840 regkind = stat.S_IFREG
840 lnkkind = stat.S_IFLNK
841 lnkkind = stat.S_IFLNK
841 join = self._join
842 join = self._join
842 dirsfound = []
843 dirsfound = []
843 foundadd = dirsfound.append
844 foundadd = dirsfound.append
844 dirsnotfound = []
845 dirsnotfound = []
845 notfoundadd = dirsnotfound.append
846 notfoundadd = dirsnotfound.append
846
847
847 if not match.isexact() and self._checkcase:
848 if not match.isexact() and self._checkcase:
848 normalize = self._normalize
849 normalize = self._normalize
849 else:
850 else:
850 normalize = None
851 normalize = None
851
852
852 files = sorted(match.files())
853 files = sorted(match.files())
853 subrepos.sort()
854 subrepos.sort()
854 i, j = 0, 0
855 i, j = 0, 0
855 while i < len(files) and j < len(subrepos):
856 while i < len(files) and j < len(subrepos):
856 subpath = subrepos[j] + "/"
857 subpath = subrepos[j] + "/"
857 if files[i] < subpath:
858 if files[i] < subpath:
858 i += 1
859 i += 1
859 continue
860 continue
860 while i < len(files) and files[i].startswith(subpath):
861 while i < len(files) and files[i].startswith(subpath):
861 del files[i]
862 del files[i]
862 j += 1
863 j += 1
863
864
864 if not files or '.' in files:
865 if not files or '.' in files:
865 files = ['.']
866 files = ['.']
866 results = dict.fromkeys(subrepos)
867 results = dict.fromkeys(subrepos)
867 results['.hg'] = None
868 results['.hg'] = None
868
869
869 alldirs = None
870 alldirs = None
870 for ff in files:
871 for ff in files:
871 # constructing the foldmap is expensive, so don't do it for the
872 # constructing the foldmap is expensive, so don't do it for the
872 # common case where files is ['.']
873 # common case where files is ['.']
873 if normalize and ff != '.':
874 if normalize and ff != '.':
874 nf = normalize(ff, False, True)
875 nf = normalize(ff, False, True)
875 else:
876 else:
876 nf = ff
877 nf = ff
877 if nf in results:
878 if nf in results:
878 continue
879 continue
879
880
880 try:
881 try:
881 st = lstat(join(nf))
882 st = lstat(join(nf))
882 kind = getkind(st.st_mode)
883 kind = getkind(st.st_mode)
883 if kind == dirkind:
884 if kind == dirkind:
884 if nf in dmap:
885 if nf in dmap:
885 # file replaced by dir on disk but still in dirstate
886 # file replaced by dir on disk but still in dirstate
886 results[nf] = None
887 results[nf] = None
887 if matchedir:
888 if matchedir:
888 matchedir(nf)
889 matchedir(nf)
889 foundadd((nf, ff))
890 foundadd((nf, ff))
890 elif kind == regkind or kind == lnkkind:
891 elif kind == regkind or kind == lnkkind:
891 results[nf] = st
892 results[nf] = st
892 else:
893 else:
893 badfn(ff, badtype(kind))
894 badfn(ff, badtype(kind))
894 if nf in dmap:
895 if nf in dmap:
895 results[nf] = None
896 results[nf] = None
896 except OSError as inst: # nf not found on disk - it is dirstate only
897 except OSError as inst: # nf not found on disk - it is dirstate only
897 if nf in dmap: # does it exactly match a missing file?
898 if nf in dmap: # does it exactly match a missing file?
898 results[nf] = None
899 results[nf] = None
899 else: # does it match a missing directory?
900 else: # does it match a missing directory?
900 if alldirs is None:
901 if alldirs is None:
901 alldirs = util.dirs(dmap)
902 alldirs = util.dirs(dmap)
902 if nf in alldirs:
903 if nf in alldirs:
903 if matchedir:
904 if matchedir:
904 matchedir(nf)
905 matchedir(nf)
905 notfoundadd(nf)
906 notfoundadd(nf)
906 else:
907 else:
907 badfn(ff, inst.strerror)
908 badfn(ff, inst.strerror)
908
909
909 # Case insensitive filesystems cannot rely on lstat() failing to detect
910 # Case insensitive filesystems cannot rely on lstat() failing to detect
910 # a case-only rename. Prune the stat object for any file that does not
911 # a case-only rename. Prune the stat object for any file that does not
911 # match the case in the filesystem, if there are multiple files that
912 # match the case in the filesystem, if there are multiple files that
912 # normalize to the same path.
913 # normalize to the same path.
913 if match.isexact() and self._checkcase:
914 if match.isexact() and self._checkcase:
914 normed = {}
915 normed = {}
915
916
916 for f, st in results.iteritems():
917 for f, st in results.iteritems():
917 if st is None:
918 if st is None:
918 continue
919 continue
919
920
920 nc = util.normcase(f)
921 nc = util.normcase(f)
921 paths = normed.get(nc)
922 paths = normed.get(nc)
922
923
923 if paths is None:
924 if paths is None:
924 paths = set()
925 paths = set()
925 normed[nc] = paths
926 normed[nc] = paths
926
927
927 paths.add(f)
928 paths.add(f)
928
929
929 for norm, paths in normed.iteritems():
930 for norm, paths in normed.iteritems():
930 if len(paths) > 1:
931 if len(paths) > 1:
931 for path in paths:
932 for path in paths:
932 folded = self._discoverpath(path, norm, True, None,
933 folded = self._discoverpath(path, norm, True, None,
933 self._dirfoldmap)
934 self._dirfoldmap)
934 if path != folded:
935 if path != folded:
935 results[path] = None
936 results[path] = None
936
937
937 return results, dirsfound, dirsnotfound
938 return results, dirsfound, dirsnotfound
938
939
939 def walk(self, match, subrepos, unknown, ignored, full=True):
940 def walk(self, match, subrepos, unknown, ignored, full=True):
940 '''
941 '''
941 Walk recursively through the directory tree, finding all files
942 Walk recursively through the directory tree, finding all files
942 matched by match.
943 matched by match.
943
944
944 If full is False, maybe skip some known-clean files.
945 If full is False, maybe skip some known-clean files.
945
946
946 Return a dict mapping filename to stat-like object (either
947 Return a dict mapping filename to stat-like object (either
947 mercurial.osutil.stat instance or return value of os.stat()).
948 mercurial.osutil.stat instance or return value of os.stat()).
948
949
949 '''
950 '''
950 # full is a flag that extensions that hook into walk can use -- this
951 # full is a flag that extensions that hook into walk can use -- this
951 # implementation doesn't use it at all. This satisfies the contract
952 # implementation doesn't use it at all. This satisfies the contract
952 # because we only guarantee a "maybe".
953 # because we only guarantee a "maybe".
953
954
954 if ignored:
955 if ignored:
955 ignore = util.never
956 ignore = util.never
956 dirignore = util.never
957 dirignore = util.never
957 elif unknown:
958 elif unknown:
958 ignore = self._ignore
959 ignore = self._ignore
959 dirignore = self._dirignore
960 dirignore = self._dirignore
960 else:
961 else:
961 # if not unknown and not ignored, drop dir recursion and step 2
962 # if not unknown and not ignored, drop dir recursion and step 2
962 ignore = util.always
963 ignore = util.always
963 dirignore = util.always
964 dirignore = util.always
964
965
965 matchfn = match.matchfn
966 matchfn = match.matchfn
966 matchalways = match.always()
967 matchalways = match.always()
967 matchtdir = match.traversedir
968 matchtdir = match.traversedir
968 dmap = self._map
969 dmap = self._map
969 listdir = osutil.listdir
970 listdir = osutil.listdir
970 lstat = os.lstat
971 lstat = os.lstat
971 dirkind = stat.S_IFDIR
972 dirkind = stat.S_IFDIR
972 regkind = stat.S_IFREG
973 regkind = stat.S_IFREG
973 lnkkind = stat.S_IFLNK
974 lnkkind = stat.S_IFLNK
974 join = self._join
975 join = self._join
975
976
976 exact = skipstep3 = False
977 exact = skipstep3 = False
977 if match.isexact(): # match.exact
978 if match.isexact(): # match.exact
978 exact = True
979 exact = True
979 dirignore = util.always # skip step 2
980 dirignore = util.always # skip step 2
980 elif match.prefix(): # match.match, no patterns
981 elif match.prefix(): # match.match, no patterns
981 skipstep3 = True
982 skipstep3 = True
982
983
983 if not exact and self._checkcase:
984 if not exact and self._checkcase:
984 normalize = self._normalize
985 normalize = self._normalize
985 normalizefile = self._normalizefile
986 normalizefile = self._normalizefile
986 skipstep3 = False
987 skipstep3 = False
987 else:
988 else:
988 normalize = self._normalize
989 normalize = self._normalize
989 normalizefile = None
990 normalizefile = None
990
991
991 # step 1: find all explicit files
992 # step 1: find all explicit files
992 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
993 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
993
994
994 skipstep3 = skipstep3 and not (work or dirsnotfound)
995 skipstep3 = skipstep3 and not (work or dirsnotfound)
995 work = [d for d in work if not dirignore(d[0])]
996 work = [d for d in work if not dirignore(d[0])]
996
997
997 # step 2: visit subdirectories
998 # step 2: visit subdirectories
998 def traverse(work, alreadynormed):
999 def traverse(work, alreadynormed):
999 wadd = work.append
1000 wadd = work.append
1000 while work:
1001 while work:
1001 nd = work.pop()
1002 nd = work.pop()
1002 skip = None
1003 skip = None
1003 if nd == '.':
1004 if nd == '.':
1004 nd = ''
1005 nd = ''
1005 else:
1006 else:
1006 skip = '.hg'
1007 skip = '.hg'
1007 try:
1008 try:
1008 entries = listdir(join(nd), stat=True, skip=skip)
1009 entries = listdir(join(nd), stat=True, skip=skip)
1009 except OSError as inst:
1010 except OSError as inst:
1010 if inst.errno in (errno.EACCES, errno.ENOENT):
1011 if inst.errno in (errno.EACCES, errno.ENOENT):
1011 match.bad(self.pathto(nd), inst.strerror)
1012 match.bad(self.pathto(nd), inst.strerror)
1012 continue
1013 continue
1013 raise
1014 raise
1014 for f, kind, st in entries:
1015 for f, kind, st in entries:
1015 if normalizefile:
1016 if normalizefile:
1016 # even though f might be a directory, we're only
1017 # even though f might be a directory, we're only
1017 # interested in comparing it to files currently in the
1018 # interested in comparing it to files currently in the
1018 # dmap -- therefore normalizefile is enough
1019 # dmap -- therefore normalizefile is enough
1019 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1020 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1020 True)
1021 True)
1021 else:
1022 else:
1022 nf = nd and (nd + "/" + f) or f
1023 nf = nd and (nd + "/" + f) or f
1023 if nf not in results:
1024 if nf not in results:
1024 if kind == dirkind:
1025 if kind == dirkind:
1025 if not ignore(nf):
1026 if not ignore(nf):
1026 if matchtdir:
1027 if matchtdir:
1027 matchtdir(nf)
1028 matchtdir(nf)
1028 wadd(nf)
1029 wadd(nf)
1029 if nf in dmap and (matchalways or matchfn(nf)):
1030 if nf in dmap and (matchalways or matchfn(nf)):
1030 results[nf] = None
1031 results[nf] = None
1031 elif kind == regkind or kind == lnkkind:
1032 elif kind == regkind or kind == lnkkind:
1032 if nf in dmap:
1033 if nf in dmap:
1033 if matchalways or matchfn(nf):
1034 if matchalways or matchfn(nf):
1034 results[nf] = st
1035 results[nf] = st
1035 elif ((matchalways or matchfn(nf))
1036 elif ((matchalways or matchfn(nf))
1036 and not ignore(nf)):
1037 and not ignore(nf)):
1037 # unknown file -- normalize if necessary
1038 # unknown file -- normalize if necessary
1038 if not alreadynormed:
1039 if not alreadynormed:
1039 nf = normalize(nf, False, True)
1040 nf = normalize(nf, False, True)
1040 results[nf] = st
1041 results[nf] = st
1041 elif nf in dmap and (matchalways or matchfn(nf)):
1042 elif nf in dmap and (matchalways or matchfn(nf)):
1042 results[nf] = None
1043 results[nf] = None
1043
1044
1044 for nd, d in work:
1045 for nd, d in work:
1045 # alreadynormed means that processwork doesn't have to do any
1046 # alreadynormed means that processwork doesn't have to do any
1046 # expensive directory normalization
1047 # expensive directory normalization
1047 alreadynormed = not normalize or nd == d
1048 alreadynormed = not normalize or nd == d
1048 traverse([d], alreadynormed)
1049 traverse([d], alreadynormed)
1049
1050
1050 for s in subrepos:
1051 for s in subrepos:
1051 del results[s]
1052 del results[s]
1052 del results['.hg']
1053 del results['.hg']
1053
1054
1054 # step 3: visit remaining files from dmap
1055 # step 3: visit remaining files from dmap
1055 if not skipstep3 and not exact:
1056 if not skipstep3 and not exact:
1056 # If a dmap file is not in results yet, it was either
1057 # If a dmap file is not in results yet, it was either
1057 # a) not matching matchfn b) ignored, c) missing, or d) under a
1058 # a) not matching matchfn b) ignored, c) missing, or d) under a
1058 # symlink directory.
1059 # symlink directory.
1059 if not results and matchalways:
1060 if not results and matchalways:
1060 visit = dmap.keys()
1061 visit = dmap.keys()
1061 else:
1062 else:
1062 visit = [f for f in dmap if f not in results and matchfn(f)]
1063 visit = [f for f in dmap if f not in results and matchfn(f)]
1063 visit.sort()
1064 visit.sort()
1064
1065
1065 if unknown:
1066 if unknown:
1066 # unknown == True means we walked all dirs under the roots
1067 # unknown == True means we walked all dirs under the roots
1067 # that wasn't ignored, and everything that matched was stat'ed
1068 # that wasn't ignored, and everything that matched was stat'ed
1068 # and is already in results.
1069 # and is already in results.
1069 # The rest must thus be ignored or under a symlink.
1070 # The rest must thus be ignored or under a symlink.
1070 audit_path = pathutil.pathauditor(self._root)
1071 audit_path = pathutil.pathauditor(self._root)
1071
1072
1072 for nf in iter(visit):
1073 for nf in iter(visit):
1073 # If a stat for the same file was already added with a
1074 # If a stat for the same file was already added with a
1074 # different case, don't add one for this, since that would
1075 # different case, don't add one for this, since that would
1075 # make it appear as if the file exists under both names
1076 # make it appear as if the file exists under both names
1076 # on disk.
1077 # on disk.
1077 if (normalizefile and
1078 if (normalizefile and
1078 normalizefile(nf, True, True) in results):
1079 normalizefile(nf, True, True) in results):
1079 results[nf] = None
1080 results[nf] = None
1080 # Report ignored items in the dmap as long as they are not
1081 # Report ignored items in the dmap as long as they are not
1081 # under a symlink directory.
1082 # under a symlink directory.
1082 elif audit_path.check(nf):
1083 elif audit_path.check(nf):
1083 try:
1084 try:
1084 results[nf] = lstat(join(nf))
1085 results[nf] = lstat(join(nf))
1085 # file was just ignored, no links, and exists
1086 # file was just ignored, no links, and exists
1086 except OSError:
1087 except OSError:
1087 # file doesn't exist
1088 # file doesn't exist
1088 results[nf] = None
1089 results[nf] = None
1089 else:
1090 else:
1090 # It's either missing or under a symlink directory
1091 # It's either missing or under a symlink directory
1091 # which we in this case report as missing
1092 # which we in this case report as missing
1092 results[nf] = None
1093 results[nf] = None
1093 else:
1094 else:
1094 # We may not have walked the full directory tree above,
1095 # We may not have walked the full directory tree above,
1095 # so stat and check everything we missed.
1096 # so stat and check everything we missed.
1096 nf = iter(visit).next
1097 nf = iter(visit).next
1097 for st in util.statfiles([join(i) for i in visit]):
1098 for st in util.statfiles([join(i) for i in visit]):
1098 results[nf()] = st
1099 results[nf()] = st
1099 return results
1100 return results
1100
1101
1101 def status(self, match, subrepos, ignored, clean, unknown):
1102 def status(self, match, subrepos, ignored, clean, unknown):
1102 '''Determine the status of the working copy relative to the
1103 '''Determine the status of the working copy relative to the
1103 dirstate and return a pair of (unsure, status), where status is of type
1104 dirstate and return a pair of (unsure, status), where status is of type
1104 scmutil.status and:
1105 scmutil.status and:
1105
1106
1106 unsure:
1107 unsure:
1107 files that might have been modified since the dirstate was
1108 files that might have been modified since the dirstate was
1108 written, but need to be read to be sure (size is the same
1109 written, but need to be read to be sure (size is the same
1109 but mtime differs)
1110 but mtime differs)
1110 status.modified:
1111 status.modified:
1111 files that have definitely been modified since the dirstate
1112 files that have definitely been modified since the dirstate
1112 was written (different size or mode)
1113 was written (different size or mode)
1113 status.clean:
1114 status.clean:
1114 files that have definitely not been modified since the
1115 files that have definitely not been modified since the
1115 dirstate was written
1116 dirstate was written
1116 '''
1117 '''
1117 listignored, listclean, listunknown = ignored, clean, unknown
1118 listignored, listclean, listunknown = ignored, clean, unknown
1118 lookup, modified, added, unknown, ignored = [], [], [], [], []
1119 lookup, modified, added, unknown, ignored = [], [], [], [], []
1119 removed, deleted, clean = [], [], []
1120 removed, deleted, clean = [], [], []
1120
1121
1121 dmap = self._map
1122 dmap = self._map
1122 ladd = lookup.append # aka "unsure"
1123 ladd = lookup.append # aka "unsure"
1123 madd = modified.append
1124 madd = modified.append
1124 aadd = added.append
1125 aadd = added.append
1125 uadd = unknown.append
1126 uadd = unknown.append
1126 iadd = ignored.append
1127 iadd = ignored.append
1127 radd = removed.append
1128 radd = removed.append
1128 dadd = deleted.append
1129 dadd = deleted.append
1129 cadd = clean.append
1130 cadd = clean.append
1130 mexact = match.exact
1131 mexact = match.exact
1131 dirignore = self._dirignore
1132 dirignore = self._dirignore
1132 checkexec = self._checkexec
1133 checkexec = self._checkexec
1133 copymap = self._copymap
1134 copymap = self._copymap
1134 lastnormaltime = self._lastnormaltime
1135 lastnormaltime = self._lastnormaltime
1135
1136
1136 # We need to do full walks when either
1137 # We need to do full walks when either
1137 # - we're listing all clean files, or
1138 # - we're listing all clean files, or
1138 # - match.traversedir does something, because match.traversedir should
1139 # - match.traversedir does something, because match.traversedir should
1139 # be called for every dir in the working dir
1140 # be called for every dir in the working dir
1140 full = listclean or match.traversedir is not None
1141 full = listclean or match.traversedir is not None
1141 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1142 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1142 full=full).iteritems():
1143 full=full).iteritems():
1143 if fn not in dmap:
1144 if fn not in dmap:
1144 if (listignored or mexact(fn)) and dirignore(fn):
1145 if (listignored or mexact(fn)) and dirignore(fn):
1145 if listignored:
1146 if listignored:
1146 iadd(fn)
1147 iadd(fn)
1147 else:
1148 else:
1148 uadd(fn)
1149 uadd(fn)
1149 continue
1150 continue
1150
1151
1151 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1152 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1152 # written like that for performance reasons. dmap[fn] is not a
1153 # written like that for performance reasons. dmap[fn] is not a
1153 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1154 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1154 # opcode has fast paths when the value to be unpacked is a tuple or
1155 # opcode has fast paths when the value to be unpacked is a tuple or
1155 # a list, but falls back to creating a full-fledged iterator in
1156 # a list, but falls back to creating a full-fledged iterator in
1156 # general. That is much slower than simply accessing and storing the
1157 # general. That is much slower than simply accessing and storing the
1157 # tuple members one by one.
1158 # tuple members one by one.
1158 t = dmap[fn]
1159 t = dmap[fn]
1159 state = t[0]
1160 state = t[0]
1160 mode = t[1]
1161 mode = t[1]
1161 size = t[2]
1162 size = t[2]
1162 time = t[3]
1163 time = t[3]
1163
1164
1164 if not st and state in "nma":
1165 if not st and state in "nma":
1165 dadd(fn)
1166 dadd(fn)
1166 elif state == 'n':
1167 elif state == 'n':
1167 if (size >= 0 and
1168 if (size >= 0 and
1168 ((size != st.st_size and size != st.st_size & _rangemask)
1169 ((size != st.st_size and size != st.st_size & _rangemask)
1169 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1170 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1170 or size == -2 # other parent
1171 or size == -2 # other parent
1171 or fn in copymap):
1172 or fn in copymap):
1172 madd(fn)
1173 madd(fn)
1173 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1174 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1174 ladd(fn)
1175 ladd(fn)
1175 elif st.st_mtime == lastnormaltime:
1176 elif st.st_mtime == lastnormaltime:
1176 # fn may have just been marked as normal and it may have
1177 # fn may have just been marked as normal and it may have
1177 # changed in the same second without changing its size.
1178 # changed in the same second without changing its size.
1178 # This can happen if we quickly do multiple commits.
1179 # This can happen if we quickly do multiple commits.
1179 # Force lookup, so we don't miss such a racy file change.
1180 # Force lookup, so we don't miss such a racy file change.
1180 ladd(fn)
1181 ladd(fn)
1181 elif listclean:
1182 elif listclean:
1182 cadd(fn)
1183 cadd(fn)
1183 elif state == 'm':
1184 elif state == 'm':
1184 madd(fn)
1185 madd(fn)
1185 elif state == 'a':
1186 elif state == 'a':
1186 aadd(fn)
1187 aadd(fn)
1187 elif state == 'r':
1188 elif state == 'r':
1188 radd(fn)
1189 radd(fn)
1189
1190
1190 return (lookup, scmutil.status(modified, added, removed, deleted,
1191 return (lookup, scmutil.status(modified, added, removed, deleted,
1191 unknown, ignored, clean))
1192 unknown, ignored, clean))
1192
1193
1193 def matches(self, match):
1194 def matches(self, match):
1194 '''
1195 '''
1195 return files in the dirstate (in whatever state) filtered by match
1196 return files in the dirstate (in whatever state) filtered by match
1196 '''
1197 '''
1197 dmap = self._map
1198 dmap = self._map
1198 if match.always():
1199 if match.always():
1199 return dmap.keys()
1200 return dmap.keys()
1200 files = match.files()
1201 files = match.files()
1201 if match.isexact():
1202 if match.isexact():
1202 # fast path -- filter the other way around, since typically files is
1203 # fast path -- filter the other way around, since typically files is
1203 # much smaller than dmap
1204 # much smaller than dmap
1204 return [f for f in files if f in dmap]
1205 return [f for f in files if f in dmap]
1205 if match.prefix() and all(fn in dmap for fn in files):
1206 if match.prefix() and all(fn in dmap for fn in files):
1206 # fast path -- all the values are known to be files, so just return
1207 # fast path -- all the values are known to be files, so just return
1207 # that
1208 # that
1208 return list(files)
1209 return list(files)
1209 return [f for f in dmap if match(f)]
1210 return [f for f in dmap if match(f)]
1210
1211
1211 def _actualfilename(self, tr):
1212 def _actualfilename(self, tr):
1212 if tr:
1213 if tr:
1213 return self._pendingfilename
1214 return self._pendingfilename
1214 else:
1215 else:
1215 return self._filename
1216 return self._filename
1216
1217
1217 def savebackup(self, tr, suffix='', prefix=''):
1218 def savebackup(self, tr, suffix='', prefix=''):
1218 '''Save current dirstate into backup file with suffix'''
1219 '''Save current dirstate into backup file with suffix'''
1219 assert len(suffix) > 0 or len(prefix) > 0
1220 assert len(suffix) > 0 or len(prefix) > 0
1220 filename = self._actualfilename(tr)
1221 filename = self._actualfilename(tr)
1221
1222
1222 # use '_writedirstate' instead of 'write' to write changes certainly,
1223 # use '_writedirstate' instead of 'write' to write changes certainly,
1223 # because the latter omits writing out if transaction is running.
1224 # because the latter omits writing out if transaction is running.
1224 # output file will be used to create backup of dirstate at this point.
1225 # output file will be used to create backup of dirstate at this point.
1225 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1226 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1226 checkambig=True))
1227 checkambig=True))
1227
1228
1228 if tr:
1229 if tr:
1229 # ensure that subsequent tr.writepending returns True for
1230 # ensure that subsequent tr.writepending returns True for
1230 # changes written out above, even if dirstate is never
1231 # changes written out above, even if dirstate is never
1231 # changed after this
1232 # changed after this
1232 tr.addfilegenerator('dirstate', (self._filename,),
1233 tr.addfilegenerator('dirstate', (self._filename,),
1233 self._writedirstate, location='plain')
1234 self._writedirstate, location='plain')
1234
1235
1235 # ensure that pending file written above is unlinked at
1236 # ensure that pending file written above is unlinked at
1236 # failure, even if tr.writepending isn't invoked until the
1237 # failure, even if tr.writepending isn't invoked until the
1237 # end of this transaction
1238 # end of this transaction
1238 tr.registertmp(filename, location='plain')
1239 tr.registertmp(filename, location='plain')
1239
1240
1240 self._opener.write(prefix + self._filename + suffix,
1241 self._opener.write(prefix + self._filename + suffix,
1241 self._opener.tryread(filename))
1242 self._opener.tryread(filename))
1242
1243
1243 def restorebackup(self, tr, suffix='', prefix=''):
1244 def restorebackup(self, tr, suffix='', prefix=''):
1244 '''Restore dirstate by backup file with suffix'''
1245 '''Restore dirstate by backup file with suffix'''
1245 assert len(suffix) > 0 or len(prefix) > 0
1246 assert len(suffix) > 0 or len(prefix) > 0
1246 # this "invalidate()" prevents "wlock.release()" from writing
1247 # this "invalidate()" prevents "wlock.release()" from writing
1247 # changes of dirstate out after restoring from backup file
1248 # changes of dirstate out after restoring from backup file
1248 self.invalidate()
1249 self.invalidate()
1249 filename = self._actualfilename(tr)
1250 filename = self._actualfilename(tr)
1250 # using self._filename to avoid having "pending" in the backup filename
1251 # using self._filename to avoid having "pending" in the backup filename
1251 self._opener.rename(prefix + self._filename + suffix, filename,
1252 self._opener.rename(prefix + self._filename + suffix, filename,
1252 checkambig=True)
1253 checkambig=True)
1253
1254
1254 def clearbackup(self, tr, suffix='', prefix=''):
1255 def clearbackup(self, tr, suffix='', prefix=''):
1255 '''Clear backup file with suffix'''
1256 '''Clear backup file with suffix'''
1256 assert len(suffix) > 0 or len(prefix) > 0
1257 assert len(suffix) > 0 or len(prefix) > 0
1257 # using self._filename to avoid having "pending" in the backup filename
1258 # using self._filename to avoid having "pending" in the backup filename
1258 self._opener.unlink(prefix + self._filename + suffix)
1259 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now