##// END OF EJS Templates
dirstate: simplify dirstate's __iter__...
Alex Gaynor -
r33673:36d216dc default
parent child Browse files
Show More
@@ -1,1343 +1,1342 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd).st_mtime
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 def nonnormalentries(dmap):
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap)
61 except AttributeError:
61 except AttributeError:
62 nonnorm = set()
62 nonnorm = set()
63 otherparent = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
68 otherparent.add(fname)
69 return nonnorm, otherparent
69 return nonnorm, otherparent
70
70
71 class dirstate(object):
71 class dirstate(object):
72
72
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 '''Create a new dirstate object.
74 '''Create a new dirstate object.
75
75
76 opener is an open()-like callable that can be used to open the
76 opener is an open()-like callable that can be used to open the
77 dirstate file; root is the root of the directory tracked by
77 dirstate file; root is the root of the directory tracked by
78 the dirstate.
78 the dirstate.
79 '''
79 '''
80 self._opener = opener
80 self._opener = opener
81 self._validate = validate
81 self._validate = validate
82 self._root = root
82 self._root = root
83 self._sparsematchfn = sparsematchfn
83 self._sparsematchfn = sparsematchfn
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # UNC path pointing to root share (issue4557)
85 # UNC path pointing to root share (issue4557)
86 self._rootdir = pathutil.normasprefix(root)
86 self._rootdir = pathutil.normasprefix(root)
87 self._dirty = False
87 self._dirty = False
88 self._dirtypl = False
88 self._dirtypl = False
89 self._lastnormaltime = 0
89 self._lastnormaltime = 0
90 self._ui = ui
90 self._ui = ui
91 self._filecache = {}
91 self._filecache = {}
92 self._parentwriters = 0
92 self._parentwriters = 0
93 self._filename = 'dirstate'
93 self._filename = 'dirstate'
94 self._pendingfilename = '%s.pending' % self._filename
94 self._pendingfilename = '%s.pending' % self._filename
95 self._plchangecallbacks = {}
95 self._plchangecallbacks = {}
96 self._origpl = None
96 self._origpl = None
97 self._updatedfiles = set()
97 self._updatedfiles = set()
98
98
99 # for consistent view between _pl() and _read() invocations
99 # for consistent view between _pl() and _read() invocations
100 self._pendingmode = None
100 self._pendingmode = None
101
101
102 @contextlib.contextmanager
102 @contextlib.contextmanager
103 def parentchange(self):
103 def parentchange(self):
104 '''Context manager for handling dirstate parents.
104 '''Context manager for handling dirstate parents.
105
105
106 If an exception occurs in the scope of the context manager,
106 If an exception occurs in the scope of the context manager,
107 the incoherent dirstate won't be written when wlock is
107 the incoherent dirstate won't be written when wlock is
108 released.
108 released.
109 '''
109 '''
110 self._parentwriters += 1
110 self._parentwriters += 1
111 yield
111 yield
112 # Typically we want the "undo" step of a context manager in a
112 # Typically we want the "undo" step of a context manager in a
113 # finally block so it happens even when an exception
113 # finally block so it happens even when an exception
114 # occurs. In this case, however, we only want to decrement
114 # occurs. In this case, however, we only want to decrement
115 # parentwriters if the code in the with statement exits
115 # parentwriters if the code in the with statement exits
116 # normally, so we don't have a try/finally here on purpose.
116 # normally, so we don't have a try/finally here on purpose.
117 self._parentwriters -= 1
117 self._parentwriters -= 1
118
118
119 def beginparentchange(self):
119 def beginparentchange(self):
120 '''Marks the beginning of a set of changes that involve changing
120 '''Marks the beginning of a set of changes that involve changing
121 the dirstate parents. If there is an exception during this time,
121 the dirstate parents. If there is an exception during this time,
122 the dirstate will not be written when the wlock is released. This
122 the dirstate will not be written when the wlock is released. This
123 prevents writing an incoherent dirstate where the parent doesn't
123 prevents writing an incoherent dirstate where the parent doesn't
124 match the contents.
124 match the contents.
125 '''
125 '''
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 'parentchange context manager.', '4.3')
127 'parentchange context manager.', '4.3')
128 self._parentwriters += 1
128 self._parentwriters += 1
129
129
130 def endparentchange(self):
130 def endparentchange(self):
131 '''Marks the end of a set of changes that involve changing the
131 '''Marks the end of a set of changes that involve changing the
132 dirstate parents. Once all parent changes have been marked done,
132 dirstate parents. Once all parent changes have been marked done,
133 the wlock will be free to write the dirstate on release.
133 the wlock will be free to write the dirstate on release.
134 '''
134 '''
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 'parentchange context manager.', '4.3')
136 'parentchange context manager.', '4.3')
137 if self._parentwriters > 0:
137 if self._parentwriters > 0:
138 self._parentwriters -= 1
138 self._parentwriters -= 1
139
139
140 def pendingparentchange(self):
140 def pendingparentchange(self):
141 '''Returns true if the dirstate is in the middle of a set of changes
141 '''Returns true if the dirstate is in the middle of a set of changes
142 that modify the dirstate parent.
142 that modify the dirstate parent.
143 '''
143 '''
144 return self._parentwriters > 0
144 return self._parentwriters > 0
145
145
146 @propertycache
146 @propertycache
147 def _map(self):
147 def _map(self):
148 '''Return the dirstate contents as a map from filename to
148 '''Return the dirstate contents as a map from filename to
149 (state, mode, size, time).'''
149 (state, mode, size, time).'''
150 self._read()
150 self._read()
151 return self._map
151 return self._map
152
152
153 @propertycache
153 @propertycache
154 def _copymap(self):
154 def _copymap(self):
155 self._read()
155 self._read()
156 return self._copymap
156 return self._copymap
157
157
158 @propertycache
158 @propertycache
159 def _identity(self):
159 def _identity(self):
160 self._read()
160 self._read()
161 return self._identity
161 return self._identity
162
162
163 @propertycache
163 @propertycache
164 def _nonnormalset(self):
164 def _nonnormalset(self):
165 nonnorm, otherparents = nonnormalentries(self._map)
165 nonnorm, otherparents = nonnormalentries(self._map)
166 self._otherparentset = otherparents
166 self._otherparentset = otherparents
167 return nonnorm
167 return nonnorm
168
168
169 @propertycache
169 @propertycache
170 def _otherparentset(self):
170 def _otherparentset(self):
171 nonnorm, otherparents = nonnormalentries(self._map)
171 nonnorm, otherparents = nonnormalentries(self._map)
172 self._nonnormalset = nonnorm
172 self._nonnormalset = nonnorm
173 return otherparents
173 return otherparents
174
174
175 @propertycache
175 @propertycache
176 def _filefoldmap(self):
176 def _filefoldmap(self):
177 try:
177 try:
178 makefilefoldmap = parsers.make_file_foldmap
178 makefilefoldmap = parsers.make_file_foldmap
179 except AttributeError:
179 except AttributeError:
180 pass
180 pass
181 else:
181 else:
182 return makefilefoldmap(self._map, util.normcasespec,
182 return makefilefoldmap(self._map, util.normcasespec,
183 util.normcasefallback)
183 util.normcasefallback)
184
184
185 f = {}
185 f = {}
186 normcase = util.normcase
186 normcase = util.normcase
187 for name, s in self._map.iteritems():
187 for name, s in self._map.iteritems():
188 if s[0] != 'r':
188 if s[0] != 'r':
189 f[normcase(name)] = name
189 f[normcase(name)] = name
190 f['.'] = '.' # prevents useless util.fspath() invocation
190 f['.'] = '.' # prevents useless util.fspath() invocation
191 return f
191 return f
192
192
193 @propertycache
193 @propertycache
194 def _dirfoldmap(self):
194 def _dirfoldmap(self):
195 f = {}
195 f = {}
196 normcase = util.normcase
196 normcase = util.normcase
197 for name in self._dirs:
197 for name in self._dirs:
198 f[normcase(name)] = name
198 f[normcase(name)] = name
199 return f
199 return f
200
200
201 @property
201 @property
202 def _sparsematcher(self):
202 def _sparsematcher(self):
203 """The matcher for the sparse checkout.
203 """The matcher for the sparse checkout.
204
204
205 The working directory may not include every file from a manifest. The
205 The working directory may not include every file from a manifest. The
206 matcher obtained by this property will match a path if it is to be
206 matcher obtained by this property will match a path if it is to be
207 included in the working directory.
207 included in the working directory.
208 """
208 """
209 # TODO there is potential to cache this property. For now, the matcher
209 # TODO there is potential to cache this property. For now, the matcher
210 # is resolved on every access. (But the called function does use a
210 # is resolved on every access. (But the called function does use a
211 # cache to keep the lookup fast.)
211 # cache to keep the lookup fast.)
212 return self._sparsematchfn()
212 return self._sparsematchfn()
213
213
214 @repocache('branch')
214 @repocache('branch')
215 def _branch(self):
215 def _branch(self):
216 try:
216 try:
217 return self._opener.read("branch").strip() or "default"
217 return self._opener.read("branch").strip() or "default"
218 except IOError as inst:
218 except IOError as inst:
219 if inst.errno != errno.ENOENT:
219 if inst.errno != errno.ENOENT:
220 raise
220 raise
221 return "default"
221 return "default"
222
222
223 @propertycache
223 @propertycache
224 def _pl(self):
224 def _pl(self):
225 try:
225 try:
226 fp = self._opendirstatefile()
226 fp = self._opendirstatefile()
227 st = fp.read(40)
227 st = fp.read(40)
228 fp.close()
228 fp.close()
229 l = len(st)
229 l = len(st)
230 if l == 40:
230 if l == 40:
231 return st[:20], st[20:40]
231 return st[:20], st[20:40]
232 elif l > 0 and l < 40:
232 elif l > 0 and l < 40:
233 raise error.Abort(_('working directory state appears damaged!'))
233 raise error.Abort(_('working directory state appears damaged!'))
234 except IOError as err:
234 except IOError as err:
235 if err.errno != errno.ENOENT:
235 if err.errno != errno.ENOENT:
236 raise
236 raise
237 return [nullid, nullid]
237 return [nullid, nullid]
238
238
239 @propertycache
239 @propertycache
240 def _dirs(self):
240 def _dirs(self):
241 return util.dirs(self._map, 'r')
241 return util.dirs(self._map, 'r')
242
242
243 def dirs(self):
243 def dirs(self):
244 return self._dirs
244 return self._dirs
245
245
246 @rootcache('.hgignore')
246 @rootcache('.hgignore')
247 def _ignore(self):
247 def _ignore(self):
248 files = self._ignorefiles()
248 files = self._ignorefiles()
249 if not files:
249 if not files:
250 return matchmod.never(self._root, '')
250 return matchmod.never(self._root, '')
251
251
252 pats = ['include:%s' % f for f in files]
252 pats = ['include:%s' % f for f in files]
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254
254
255 @propertycache
255 @propertycache
256 def _slash(self):
256 def _slash(self):
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258
258
259 @propertycache
259 @propertycache
260 def _checklink(self):
260 def _checklink(self):
261 return util.checklink(self._root)
261 return util.checklink(self._root)
262
262
263 @propertycache
263 @propertycache
264 def _checkexec(self):
264 def _checkexec(self):
265 return util.checkexec(self._root)
265 return util.checkexec(self._root)
266
266
267 @propertycache
267 @propertycache
268 def _checkcase(self):
268 def _checkcase(self):
269 return not util.fscasesensitive(self._join('.hg'))
269 return not util.fscasesensitive(self._join('.hg'))
270
270
271 def _join(self, f):
271 def _join(self, f):
272 # much faster than os.path.join()
272 # much faster than os.path.join()
273 # it's safe because f is always a relative path
273 # it's safe because f is always a relative path
274 return self._rootdir + f
274 return self._rootdir + f
275
275
276 def flagfunc(self, buildfallback):
276 def flagfunc(self, buildfallback):
277 if self._checklink and self._checkexec:
277 if self._checklink and self._checkexec:
278 def f(x):
278 def f(x):
279 try:
279 try:
280 st = os.lstat(self._join(x))
280 st = os.lstat(self._join(x))
281 if util.statislink(st):
281 if util.statislink(st):
282 return 'l'
282 return 'l'
283 if util.statisexec(st):
283 if util.statisexec(st):
284 return 'x'
284 return 'x'
285 except OSError:
285 except OSError:
286 pass
286 pass
287 return ''
287 return ''
288 return f
288 return f
289
289
290 fallback = buildfallback()
290 fallback = buildfallback()
291 if self._checklink:
291 if self._checklink:
292 def f(x):
292 def f(x):
293 if os.path.islink(self._join(x)):
293 if os.path.islink(self._join(x)):
294 return 'l'
294 return 'l'
295 if 'x' in fallback(x):
295 if 'x' in fallback(x):
296 return 'x'
296 return 'x'
297 return ''
297 return ''
298 return f
298 return f
299 if self._checkexec:
299 if self._checkexec:
300 def f(x):
300 def f(x):
301 if 'l' in fallback(x):
301 if 'l' in fallback(x):
302 return 'l'
302 return 'l'
303 if util.isexec(self._join(x)):
303 if util.isexec(self._join(x)):
304 return 'x'
304 return 'x'
305 return ''
305 return ''
306 return f
306 return f
307 else:
307 else:
308 return fallback
308 return fallback
309
309
310 @propertycache
310 @propertycache
311 def _cwd(self):
311 def _cwd(self):
312 # internal config: ui.forcecwd
312 # internal config: ui.forcecwd
313 forcecwd = self._ui.config('ui', 'forcecwd')
313 forcecwd = self._ui.config('ui', 'forcecwd')
314 if forcecwd:
314 if forcecwd:
315 return forcecwd
315 return forcecwd
316 return pycompat.getcwd()
316 return pycompat.getcwd()
317
317
318 def getcwd(self):
318 def getcwd(self):
319 '''Return the path from which a canonical path is calculated.
319 '''Return the path from which a canonical path is calculated.
320
320
321 This path should be used to resolve file patterns or to convert
321 This path should be used to resolve file patterns or to convert
322 canonical paths back to file paths for display. It shouldn't be
322 canonical paths back to file paths for display. It shouldn't be
323 used to get real file paths. Use vfs functions instead.
323 used to get real file paths. Use vfs functions instead.
324 '''
324 '''
325 cwd = self._cwd
325 cwd = self._cwd
326 if cwd == self._root:
326 if cwd == self._root:
327 return ''
327 return ''
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 rootsep = self._root
329 rootsep = self._root
330 if not util.endswithsep(rootsep):
330 if not util.endswithsep(rootsep):
331 rootsep += pycompat.ossep
331 rootsep += pycompat.ossep
332 if cwd.startswith(rootsep):
332 if cwd.startswith(rootsep):
333 return cwd[len(rootsep):]
333 return cwd[len(rootsep):]
334 else:
334 else:
335 # we're outside the repo. return an absolute path.
335 # we're outside the repo. return an absolute path.
336 return cwd
336 return cwd
337
337
338 def pathto(self, f, cwd=None):
338 def pathto(self, f, cwd=None):
339 if cwd is None:
339 if cwd is None:
340 cwd = self.getcwd()
340 cwd = self.getcwd()
341 path = util.pathto(self._root, cwd, f)
341 path = util.pathto(self._root, cwd, f)
342 if self._slash:
342 if self._slash:
343 return util.pconvert(path)
343 return util.pconvert(path)
344 return path
344 return path
345
345
346 def __getitem__(self, key):
346 def __getitem__(self, key):
347 '''Return the current state of key (a filename) in the dirstate.
347 '''Return the current state of key (a filename) in the dirstate.
348
348
349 States are:
349 States are:
350 n normal
350 n normal
351 m needs merging
351 m needs merging
352 r marked for removal
352 r marked for removal
353 a marked for addition
353 a marked for addition
354 ? not tracked
354 ? not tracked
355 '''
355 '''
356 return self._map.get(key, ("?",))[0]
356 return self._map.get(key, ("?",))[0]
357
357
358 def __contains__(self, key):
358 def __contains__(self, key):
359 return key in self._map
359 return key in self._map
360
360
361 def __iter__(self):
361 def __iter__(self):
362 for x in sorted(self._map):
362 return iter(sorted(self._map))
363 yield x
364
363
365 def items(self):
364 def items(self):
366 return self._map.iteritems()
365 return self._map.iteritems()
367
366
368 iteritems = items
367 iteritems = items
369
368
370 def parents(self):
369 def parents(self):
371 return [self._validate(p) for p in self._pl]
370 return [self._validate(p) for p in self._pl]
372
371
373 def p1(self):
372 def p1(self):
374 return self._validate(self._pl[0])
373 return self._validate(self._pl[0])
375
374
376 def p2(self):
375 def p2(self):
377 return self._validate(self._pl[1])
376 return self._validate(self._pl[1])
378
377
379 def branch(self):
378 def branch(self):
380 return encoding.tolocal(self._branch)
379 return encoding.tolocal(self._branch)
381
380
382 def setparents(self, p1, p2=nullid):
381 def setparents(self, p1, p2=nullid):
383 """Set dirstate parents to p1 and p2.
382 """Set dirstate parents to p1 and p2.
384
383
385 When moving from two parents to one, 'm' merged entries a
384 When moving from two parents to one, 'm' merged entries a
386 adjusted to normal and previous copy records discarded and
385 adjusted to normal and previous copy records discarded and
387 returned by the call.
386 returned by the call.
388
387
389 See localrepo.setparents()
388 See localrepo.setparents()
390 """
389 """
391 if self._parentwriters == 0:
390 if self._parentwriters == 0:
392 raise ValueError("cannot set dirstate parent without "
391 raise ValueError("cannot set dirstate parent without "
393 "calling dirstate.beginparentchange")
392 "calling dirstate.beginparentchange")
394
393
395 self._dirty = self._dirtypl = True
394 self._dirty = self._dirtypl = True
396 oldp2 = self._pl[1]
395 oldp2 = self._pl[1]
397 if self._origpl is None:
396 if self._origpl is None:
398 self._origpl = self._pl
397 self._origpl = self._pl
399 self._pl = p1, p2
398 self._pl = p1, p2
400 copies = {}
399 copies = {}
401 if oldp2 != nullid and p2 == nullid:
400 if oldp2 != nullid and p2 == nullid:
402 candidatefiles = self._nonnormalset.union(self._otherparentset)
401 candidatefiles = self._nonnormalset.union(self._otherparentset)
403 for f in candidatefiles:
402 for f in candidatefiles:
404 s = self._map.get(f)
403 s = self._map.get(f)
405 if s is None:
404 if s is None:
406 continue
405 continue
407
406
408 # Discard 'm' markers when moving away from a merge state
407 # Discard 'm' markers when moving away from a merge state
409 if s[0] == 'm':
408 if s[0] == 'm':
410 if f in self._copymap:
409 if f in self._copymap:
411 copies[f] = self._copymap[f]
410 copies[f] = self._copymap[f]
412 self.normallookup(f)
411 self.normallookup(f)
413 # Also fix up otherparent markers
412 # Also fix up otherparent markers
414 elif s[0] == 'n' and s[2] == -2:
413 elif s[0] == 'n' and s[2] == -2:
415 if f in self._copymap:
414 if f in self._copymap:
416 copies[f] = self._copymap[f]
415 copies[f] = self._copymap[f]
417 self.add(f)
416 self.add(f)
418 return copies
417 return copies
419
418
420 def setbranch(self, branch):
419 def setbranch(self, branch):
421 self._branch = encoding.fromlocal(branch)
420 self._branch = encoding.fromlocal(branch)
422 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
421 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
423 try:
422 try:
424 f.write(self._branch + '\n')
423 f.write(self._branch + '\n')
425 f.close()
424 f.close()
426
425
427 # make sure filecache has the correct stat info for _branch after
426 # make sure filecache has the correct stat info for _branch after
428 # replacing the underlying file
427 # replacing the underlying file
429 ce = self._filecache['_branch']
428 ce = self._filecache['_branch']
430 if ce:
429 if ce:
431 ce.refresh()
430 ce.refresh()
432 except: # re-raises
431 except: # re-raises
433 f.discard()
432 f.discard()
434 raise
433 raise
435
434
436 def _opendirstatefile(self):
435 def _opendirstatefile(self):
437 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
436 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
438 if self._pendingmode is not None and self._pendingmode != mode:
437 if self._pendingmode is not None and self._pendingmode != mode:
439 fp.close()
438 fp.close()
440 raise error.Abort(_('working directory state may be '
439 raise error.Abort(_('working directory state may be '
441 'changed parallelly'))
440 'changed parallelly'))
442 self._pendingmode = mode
441 self._pendingmode = mode
443 return fp
442 return fp
444
443
445 def _read(self):
444 def _read(self):
446 self._map = {}
445 self._map = {}
447 self._copymap = {}
446 self._copymap = {}
448 # ignore HG_PENDING because identity is used only for writing
447 # ignore HG_PENDING because identity is used only for writing
449 self._identity = util.filestat.frompath(
448 self._identity = util.filestat.frompath(
450 self._opener.join(self._filename))
449 self._opener.join(self._filename))
451 try:
450 try:
452 fp = self._opendirstatefile()
451 fp = self._opendirstatefile()
453 try:
452 try:
454 st = fp.read()
453 st = fp.read()
455 finally:
454 finally:
456 fp.close()
455 fp.close()
457 except IOError as err:
456 except IOError as err:
458 if err.errno != errno.ENOENT:
457 if err.errno != errno.ENOENT:
459 raise
458 raise
460 return
459 return
461 if not st:
460 if not st:
462 return
461 return
463
462
464 if util.safehasattr(parsers, 'dict_new_presized'):
463 if util.safehasattr(parsers, 'dict_new_presized'):
465 # Make an estimate of the number of files in the dirstate based on
464 # Make an estimate of the number of files in the dirstate based on
466 # its size. From a linear regression on a set of real-world repos,
465 # its size. From a linear regression on a set of real-world repos,
467 # all over 10,000 files, the size of a dirstate entry is 85
466 # all over 10,000 files, the size of a dirstate entry is 85
468 # bytes. The cost of resizing is significantly higher than the cost
467 # bytes. The cost of resizing is significantly higher than the cost
469 # of filling in a larger presized dict, so subtract 20% from the
468 # of filling in a larger presized dict, so subtract 20% from the
470 # size.
469 # size.
471 #
470 #
472 # This heuristic is imperfect in many ways, so in a future dirstate
471 # This heuristic is imperfect in many ways, so in a future dirstate
473 # format update it makes sense to just record the number of entries
472 # format update it makes sense to just record the number of entries
474 # on write.
473 # on write.
475 self._map = parsers.dict_new_presized(len(st) / 71)
474 self._map = parsers.dict_new_presized(len(st) / 71)
476
475
477 # Python's garbage collector triggers a GC each time a certain number
476 # Python's garbage collector triggers a GC each time a certain number
478 # of container objects (the number being defined by
477 # of container objects (the number being defined by
479 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
478 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
480 # for each file in the dirstate. The C version then immediately marks
479 # for each file in the dirstate. The C version then immediately marks
481 # them as not to be tracked by the collector. However, this has no
480 # them as not to be tracked by the collector. However, this has no
482 # effect on when GCs are triggered, only on what objects the GC looks
481 # effect on when GCs are triggered, only on what objects the GC looks
483 # into. This means that O(number of files) GCs are unavoidable.
482 # into. This means that O(number of files) GCs are unavoidable.
484 # Depending on when in the process's lifetime the dirstate is parsed,
483 # Depending on when in the process's lifetime the dirstate is parsed,
485 # this can get very expensive. As a workaround, disable GC while
484 # this can get very expensive. As a workaround, disable GC while
486 # parsing the dirstate.
485 # parsing the dirstate.
487 #
486 #
488 # (we cannot decorate the function directly since it is in a C module)
487 # (we cannot decorate the function directly since it is in a C module)
489 parse_dirstate = util.nogc(parsers.parse_dirstate)
488 parse_dirstate = util.nogc(parsers.parse_dirstate)
490 p = parse_dirstate(self._map, self._copymap, st)
489 p = parse_dirstate(self._map, self._copymap, st)
491 if not self._dirtypl:
490 if not self._dirtypl:
492 self._pl = p
491 self._pl = p
493
492
494 def invalidate(self):
493 def invalidate(self):
495 '''Causes the next access to reread the dirstate.
494 '''Causes the next access to reread the dirstate.
496
495
497 This is different from localrepo.invalidatedirstate() because it always
496 This is different from localrepo.invalidatedirstate() because it always
498 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
497 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
499 check whether the dirstate has changed before rereading it.'''
498 check whether the dirstate has changed before rereading it.'''
500
499
501 for a in ("_map", "_copymap", "_identity",
500 for a in ("_map", "_copymap", "_identity",
502 "_filefoldmap", "_dirfoldmap", "_branch",
501 "_filefoldmap", "_dirfoldmap", "_branch",
503 "_pl", "_dirs", "_ignore", "_nonnormalset",
502 "_pl", "_dirs", "_ignore", "_nonnormalset",
504 "_otherparentset"):
503 "_otherparentset"):
505 if a in self.__dict__:
504 if a in self.__dict__:
506 delattr(self, a)
505 delattr(self, a)
507 self._lastnormaltime = 0
506 self._lastnormaltime = 0
508 self._dirty = False
507 self._dirty = False
509 self._updatedfiles.clear()
508 self._updatedfiles.clear()
510 self._parentwriters = 0
509 self._parentwriters = 0
511 self._origpl = None
510 self._origpl = None
512
511
513 def copy(self, source, dest):
512 def copy(self, source, dest):
514 """Mark dest as a copy of source. Unmark dest if source is None."""
513 """Mark dest as a copy of source. Unmark dest if source is None."""
515 if source == dest:
514 if source == dest:
516 return
515 return
517 self._dirty = True
516 self._dirty = True
518 if source is not None:
517 if source is not None:
519 self._copymap[dest] = source
518 self._copymap[dest] = source
520 self._updatedfiles.add(source)
519 self._updatedfiles.add(source)
521 self._updatedfiles.add(dest)
520 self._updatedfiles.add(dest)
522 elif dest in self._copymap:
521 elif dest in self._copymap:
523 del self._copymap[dest]
522 del self._copymap[dest]
524 self._updatedfiles.add(dest)
523 self._updatedfiles.add(dest)
525
524
526 def copied(self, file):
525 def copied(self, file):
527 return self._copymap.get(file, None)
526 return self._copymap.get(file, None)
528
527
529 def copies(self):
528 def copies(self):
530 return self._copymap
529 return self._copymap
531
530
532 def _droppath(self, f):
531 def _droppath(self, f):
533 if self[f] not in "?r" and "_dirs" in self.__dict__:
532 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 self._dirs.delpath(f)
533 self._dirs.delpath(f)
535
534
536 if "_filefoldmap" in self.__dict__:
535 if "_filefoldmap" in self.__dict__:
537 normed = util.normcase(f)
536 normed = util.normcase(f)
538 if normed in self._filefoldmap:
537 if normed in self._filefoldmap:
539 del self._filefoldmap[normed]
538 del self._filefoldmap[normed]
540
539
541 self._updatedfiles.add(f)
540 self._updatedfiles.add(f)
542
541
543 def _addpath(self, f, state, mode, size, mtime):
542 def _addpath(self, f, state, mode, size, mtime):
544 oldstate = self[f]
543 oldstate = self[f]
545 if state == 'a' or oldstate == 'r':
544 if state == 'a' or oldstate == 'r':
546 scmutil.checkfilename(f)
545 scmutil.checkfilename(f)
547 if f in self._dirs:
546 if f in self._dirs:
548 raise error.Abort(_('directory %r already in dirstate') % f)
547 raise error.Abort(_('directory %r already in dirstate') % f)
549 # shadows
548 # shadows
550 for d in util.finddirs(f):
549 for d in util.finddirs(f):
551 if d in self._dirs:
550 if d in self._dirs:
552 break
551 break
553 if d in self._map and self[d] != 'r':
552 if d in self._map and self[d] != 'r':
554 raise error.Abort(
553 raise error.Abort(
555 _('file %r in dirstate clashes with %r') % (d, f))
554 _('file %r in dirstate clashes with %r') % (d, f))
556 if oldstate in "?r" and "_dirs" in self.__dict__:
555 if oldstate in "?r" and "_dirs" in self.__dict__:
557 self._dirs.addpath(f)
556 self._dirs.addpath(f)
558 self._dirty = True
557 self._dirty = True
559 self._updatedfiles.add(f)
558 self._updatedfiles.add(f)
560 self._map[f] = dirstatetuple(state, mode, size, mtime)
559 self._map[f] = dirstatetuple(state, mode, size, mtime)
561 if state != 'n' or mtime == -1:
560 if state != 'n' or mtime == -1:
562 self._nonnormalset.add(f)
561 self._nonnormalset.add(f)
563 if size == -2:
562 if size == -2:
564 self._otherparentset.add(f)
563 self._otherparentset.add(f)
565
564
566 def normal(self, f):
565 def normal(self, f):
567 '''Mark a file normal and clean.'''
566 '''Mark a file normal and clean.'''
568 s = os.lstat(self._join(f))
567 s = os.lstat(self._join(f))
569 mtime = s.st_mtime
568 mtime = s.st_mtime
570 self._addpath(f, 'n', s.st_mode,
569 self._addpath(f, 'n', s.st_mode,
571 s.st_size & _rangemask, mtime & _rangemask)
570 s.st_size & _rangemask, mtime & _rangemask)
572 if f in self._copymap:
571 if f in self._copymap:
573 del self._copymap[f]
572 del self._copymap[f]
574 if f in self._nonnormalset:
573 if f in self._nonnormalset:
575 self._nonnormalset.remove(f)
574 self._nonnormalset.remove(f)
576 if mtime > self._lastnormaltime:
575 if mtime > self._lastnormaltime:
577 # Remember the most recent modification timeslot for status(),
576 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
577 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
578 # modifications that happen within the same timeslot.
580 self._lastnormaltime = mtime
579 self._lastnormaltime = mtime
581
580
582 def normallookup(self, f):
581 def normallookup(self, f):
583 '''Mark a file normal, but possibly dirty.'''
582 '''Mark a file normal, but possibly dirty.'''
584 if self._pl[1] != nullid and f in self._map:
583 if self._pl[1] != nullid and f in self._map:
585 # if there is a merge going on and the file was either
584 # if there is a merge going on and the file was either
586 # in state 'm' (-1) or coming from other parent (-2) before
585 # in state 'm' (-1) or coming from other parent (-2) before
587 # being removed, restore that state.
586 # being removed, restore that state.
588 entry = self._map[f]
587 entry = self._map[f]
589 if entry[0] == 'r' and entry[2] in (-1, -2):
588 if entry[0] == 'r' and entry[2] in (-1, -2):
590 source = self._copymap.get(f)
589 source = self._copymap.get(f)
591 if entry[2] == -1:
590 if entry[2] == -1:
592 self.merge(f)
591 self.merge(f)
593 elif entry[2] == -2:
592 elif entry[2] == -2:
594 self.otherparent(f)
593 self.otherparent(f)
595 if source:
594 if source:
596 self.copy(source, f)
595 self.copy(source, f)
597 return
596 return
598 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
597 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
599 return
598 return
600 self._addpath(f, 'n', 0, -1, -1)
599 self._addpath(f, 'n', 0, -1, -1)
601 if f in self._copymap:
600 if f in self._copymap:
602 del self._copymap[f]
601 del self._copymap[f]
603 if f in self._nonnormalset:
602 if f in self._nonnormalset:
604 self._nonnormalset.remove(f)
603 self._nonnormalset.remove(f)
605
604
606 def otherparent(self, f):
605 def otherparent(self, f):
607 '''Mark as coming from the other parent, always dirty.'''
606 '''Mark as coming from the other parent, always dirty.'''
608 if self._pl[1] == nullid:
607 if self._pl[1] == nullid:
609 raise error.Abort(_("setting %r to other parent "
608 raise error.Abort(_("setting %r to other parent "
610 "only allowed in merges") % f)
609 "only allowed in merges") % f)
611 if f in self and self[f] == 'n':
610 if f in self and self[f] == 'n':
612 # merge-like
611 # merge-like
613 self._addpath(f, 'm', 0, -2, -1)
612 self._addpath(f, 'm', 0, -2, -1)
614 else:
613 else:
615 # add-like
614 # add-like
616 self._addpath(f, 'n', 0, -2, -1)
615 self._addpath(f, 'n', 0, -2, -1)
617
616
618 if f in self._copymap:
617 if f in self._copymap:
619 del self._copymap[f]
618 del self._copymap[f]
620
619
621 def add(self, f):
620 def add(self, f):
622 '''Mark a file added.'''
621 '''Mark a file added.'''
623 self._addpath(f, 'a', 0, -1, -1)
622 self._addpath(f, 'a', 0, -1, -1)
624 if f in self._copymap:
623 if f in self._copymap:
625 del self._copymap[f]
624 del self._copymap[f]
626
625
627 def remove(self, f):
626 def remove(self, f):
628 '''Mark a file removed.'''
627 '''Mark a file removed.'''
629 self._dirty = True
628 self._dirty = True
630 self._droppath(f)
629 self._droppath(f)
631 size = 0
630 size = 0
632 if self._pl[1] != nullid and f in self._map:
631 if self._pl[1] != nullid and f in self._map:
633 # backup the previous state
632 # backup the previous state
634 entry = self._map[f]
633 entry = self._map[f]
635 if entry[0] == 'm': # merge
634 if entry[0] == 'm': # merge
636 size = -1
635 size = -1
637 elif entry[0] == 'n' and entry[2] == -2: # other parent
636 elif entry[0] == 'n' and entry[2] == -2: # other parent
638 size = -2
637 size = -2
639 self._otherparentset.add(f)
638 self._otherparentset.add(f)
640 self._map[f] = dirstatetuple('r', 0, size, 0)
639 self._map[f] = dirstatetuple('r', 0, size, 0)
641 self._nonnormalset.add(f)
640 self._nonnormalset.add(f)
642 if size == 0 and f in self._copymap:
641 if size == 0 and f in self._copymap:
643 del self._copymap[f]
642 del self._copymap[f]
644
643
645 def merge(self, f):
644 def merge(self, f):
646 '''Mark a file merged.'''
645 '''Mark a file merged.'''
647 if self._pl[1] == nullid:
646 if self._pl[1] == nullid:
648 return self.normallookup(f)
647 return self.normallookup(f)
649 return self.otherparent(f)
648 return self.otherparent(f)
650
649
651 def drop(self, f):
650 def drop(self, f):
652 '''Drop a file from the dirstate'''
651 '''Drop a file from the dirstate'''
653 if f in self._map:
652 if f in self._map:
654 self._dirty = True
653 self._dirty = True
655 self._droppath(f)
654 self._droppath(f)
656 del self._map[f]
655 del self._map[f]
657 if f in self._nonnormalset:
656 if f in self._nonnormalset:
658 self._nonnormalset.remove(f)
657 self._nonnormalset.remove(f)
659 if f in self._copymap:
658 if f in self._copymap:
660 del self._copymap[f]
659 del self._copymap[f]
661
660
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
661 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
663 if exists is None:
662 if exists is None:
664 exists = os.path.lexists(os.path.join(self._root, path))
663 exists = os.path.lexists(os.path.join(self._root, path))
665 if not exists:
664 if not exists:
666 # Maybe a path component exists
665 # Maybe a path component exists
667 if not ignoremissing and '/' in path:
666 if not ignoremissing and '/' in path:
668 d, f = path.rsplit('/', 1)
667 d, f = path.rsplit('/', 1)
669 d = self._normalize(d, False, ignoremissing, None)
668 d = self._normalize(d, False, ignoremissing, None)
670 folded = d + "/" + f
669 folded = d + "/" + f
671 else:
670 else:
672 # No path components, preserve original case
671 # No path components, preserve original case
673 folded = path
672 folded = path
674 else:
673 else:
675 # recursively normalize leading directory components
674 # recursively normalize leading directory components
676 # against dirstate
675 # against dirstate
677 if '/' in normed:
676 if '/' in normed:
678 d, f = normed.rsplit('/', 1)
677 d, f = normed.rsplit('/', 1)
679 d = self._normalize(d, False, ignoremissing, True)
678 d = self._normalize(d, False, ignoremissing, True)
680 r = self._root + "/" + d
679 r = self._root + "/" + d
681 folded = d + "/" + util.fspath(f, r)
680 folded = d + "/" + util.fspath(f, r)
682 else:
681 else:
683 folded = util.fspath(normed, self._root)
682 folded = util.fspath(normed, self._root)
684 storemap[normed] = folded
683 storemap[normed] = folded
685
684
686 return folded
685 return folded
687
686
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
687 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
689 normed = util.normcase(path)
688 normed = util.normcase(path)
690 folded = self._filefoldmap.get(normed, None)
689 folded = self._filefoldmap.get(normed, None)
691 if folded is None:
690 if folded is None:
692 if isknown:
691 if isknown:
693 folded = path
692 folded = path
694 else:
693 else:
695 folded = self._discoverpath(path, normed, ignoremissing, exists,
694 folded = self._discoverpath(path, normed, ignoremissing, exists,
696 self._filefoldmap)
695 self._filefoldmap)
697 return folded
696 return folded
698
697
699 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
698 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
700 normed = util.normcase(path)
699 normed = util.normcase(path)
701 folded = self._filefoldmap.get(normed, None)
700 folded = self._filefoldmap.get(normed, None)
702 if folded is None:
701 if folded is None:
703 folded = self._dirfoldmap.get(normed, None)
702 folded = self._dirfoldmap.get(normed, None)
704 if folded is None:
703 if folded is None:
705 if isknown:
704 if isknown:
706 folded = path
705 folded = path
707 else:
706 else:
708 # store discovered result in dirfoldmap so that future
707 # store discovered result in dirfoldmap so that future
709 # normalizefile calls don't start matching directories
708 # normalizefile calls don't start matching directories
710 folded = self._discoverpath(path, normed, ignoremissing, exists,
709 folded = self._discoverpath(path, normed, ignoremissing, exists,
711 self._dirfoldmap)
710 self._dirfoldmap)
712 return folded
711 return folded
713
712
714 def normalize(self, path, isknown=False, ignoremissing=False):
713 def normalize(self, path, isknown=False, ignoremissing=False):
715 '''
714 '''
716 normalize the case of a pathname when on a casefolding filesystem
715 normalize the case of a pathname when on a casefolding filesystem
717
716
718 isknown specifies whether the filename came from walking the
717 isknown specifies whether the filename came from walking the
719 disk, to avoid extra filesystem access.
718 disk, to avoid extra filesystem access.
720
719
721 If ignoremissing is True, missing path are returned
720 If ignoremissing is True, missing path are returned
722 unchanged. Otherwise, we try harder to normalize possibly
721 unchanged. Otherwise, we try harder to normalize possibly
723 existing path components.
722 existing path components.
724
723
725 The normalized case is determined based on the following precedence:
724 The normalized case is determined based on the following precedence:
726
725
727 - version of name already stored in the dirstate
726 - version of name already stored in the dirstate
728 - version of name stored on disk
727 - version of name stored on disk
729 - version provided via command arguments
728 - version provided via command arguments
730 '''
729 '''
731
730
732 if self._checkcase:
731 if self._checkcase:
733 return self._normalize(path, isknown, ignoremissing)
732 return self._normalize(path, isknown, ignoremissing)
734 return path
733 return path
735
734
736 def clear(self):
735 def clear(self):
737 self._map = {}
736 self._map = {}
738 self._nonnormalset = set()
737 self._nonnormalset = set()
739 self._otherparentset = set()
738 self._otherparentset = set()
740 if "_dirs" in self.__dict__:
739 if "_dirs" in self.__dict__:
741 delattr(self, "_dirs")
740 delattr(self, "_dirs")
742 self._copymap = {}
741 self._copymap = {}
743 self._pl = [nullid, nullid]
742 self._pl = [nullid, nullid]
744 self._lastnormaltime = 0
743 self._lastnormaltime = 0
745 self._updatedfiles.clear()
744 self._updatedfiles.clear()
746 self._dirty = True
745 self._dirty = True
747
746
748 def rebuild(self, parent, allfiles, changedfiles=None):
747 def rebuild(self, parent, allfiles, changedfiles=None):
749 if changedfiles is None:
748 if changedfiles is None:
750 # Rebuild entire dirstate
749 # Rebuild entire dirstate
751 changedfiles = allfiles
750 changedfiles = allfiles
752 lastnormaltime = self._lastnormaltime
751 lastnormaltime = self._lastnormaltime
753 self.clear()
752 self.clear()
754 self._lastnormaltime = lastnormaltime
753 self._lastnormaltime = lastnormaltime
755
754
756 if self._origpl is None:
755 if self._origpl is None:
757 self._origpl = self._pl
756 self._origpl = self._pl
758 self._pl = (parent, nullid)
757 self._pl = (parent, nullid)
759 for f in changedfiles:
758 for f in changedfiles:
760 if f in allfiles:
759 if f in allfiles:
761 self.normallookup(f)
760 self.normallookup(f)
762 else:
761 else:
763 self.drop(f)
762 self.drop(f)
764
763
765 self._dirty = True
764 self._dirty = True
766
765
767 def identity(self):
766 def identity(self):
768 '''Return identity of dirstate itself to detect changing in storage
767 '''Return identity of dirstate itself to detect changing in storage
769
768
770 If identity of previous dirstate is equal to this, writing
769 If identity of previous dirstate is equal to this, writing
771 changes based on the former dirstate out can keep consistency.
770 changes based on the former dirstate out can keep consistency.
772 '''
771 '''
773 return self._identity
772 return self._identity
774
773
775 def write(self, tr):
774 def write(self, tr):
776 if not self._dirty:
775 if not self._dirty:
777 return
776 return
778
777
779 filename = self._filename
778 filename = self._filename
780 if tr:
779 if tr:
781 # 'dirstate.write()' is not only for writing in-memory
780 # 'dirstate.write()' is not only for writing in-memory
782 # changes out, but also for dropping ambiguous timestamp.
781 # changes out, but also for dropping ambiguous timestamp.
783 # delayed writing re-raise "ambiguous timestamp issue".
782 # delayed writing re-raise "ambiguous timestamp issue".
784 # See also the wiki page below for detail:
783 # See also the wiki page below for detail:
785 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
784 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
786
785
787 # emulate dropping timestamp in 'parsers.pack_dirstate'
786 # emulate dropping timestamp in 'parsers.pack_dirstate'
788 now = _getfsnow(self._opener)
787 now = _getfsnow(self._opener)
789 dmap = self._map
788 dmap = self._map
790 for f in self._updatedfiles:
789 for f in self._updatedfiles:
791 e = dmap.get(f)
790 e = dmap.get(f)
792 if e is not None and e[0] == 'n' and e[3] == now:
791 if e is not None and e[0] == 'n' and e[3] == now:
793 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
792 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
794 self._nonnormalset.add(f)
793 self._nonnormalset.add(f)
795
794
796 # emulate that all 'dirstate.normal' results are written out
795 # emulate that all 'dirstate.normal' results are written out
797 self._lastnormaltime = 0
796 self._lastnormaltime = 0
798 self._updatedfiles.clear()
797 self._updatedfiles.clear()
799
798
800 # delay writing in-memory changes out
799 # delay writing in-memory changes out
801 tr.addfilegenerator('dirstate', (self._filename,),
800 tr.addfilegenerator('dirstate', (self._filename,),
802 self._writedirstate, location='plain')
801 self._writedirstate, location='plain')
803 return
802 return
804
803
805 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
804 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
806 self._writedirstate(st)
805 self._writedirstate(st)
807
806
808 def addparentchangecallback(self, category, callback):
807 def addparentchangecallback(self, category, callback):
809 """add a callback to be called when the wd parents are changed
808 """add a callback to be called when the wd parents are changed
810
809
811 Callback will be called with the following arguments:
810 Callback will be called with the following arguments:
812 dirstate, (oldp1, oldp2), (newp1, newp2)
811 dirstate, (oldp1, oldp2), (newp1, newp2)
813
812
814 Category is a unique identifier to allow overwriting an old callback
813 Category is a unique identifier to allow overwriting an old callback
815 with a newer callback.
814 with a newer callback.
816 """
815 """
817 self._plchangecallbacks[category] = callback
816 self._plchangecallbacks[category] = callback
818
817
819 def _writedirstate(self, st):
818 def _writedirstate(self, st):
820 # notify callbacks about parents change
819 # notify callbacks about parents change
821 if self._origpl is not None and self._origpl != self._pl:
820 if self._origpl is not None and self._origpl != self._pl:
822 for c, callback in sorted(self._plchangecallbacks.iteritems()):
821 for c, callback in sorted(self._plchangecallbacks.iteritems()):
823 callback(self, self._origpl, self._pl)
822 callback(self, self._origpl, self._pl)
824 self._origpl = None
823 self._origpl = None
825 # use the modification time of the newly created temporary file as the
824 # use the modification time of the newly created temporary file as the
826 # filesystem's notion of 'now'
825 # filesystem's notion of 'now'
827 now = util.fstat(st).st_mtime & _rangemask
826 now = util.fstat(st).st_mtime & _rangemask
828
827
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 # timestamp of each entries in dirstate, because of 'now > mtime'
829 # timestamp of each entries in dirstate, because of 'now > mtime'
831 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
830 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
832 if delaywrite > 0:
831 if delaywrite > 0:
833 # do we have any files to delay for?
832 # do we have any files to delay for?
834 for f, e in self._map.iteritems():
833 for f, e in self._map.iteritems():
835 if e[0] == 'n' and e[3] == now:
834 if e[0] == 'n' and e[3] == now:
836 import time # to avoid useless import
835 import time # to avoid useless import
837 # rather than sleep n seconds, sleep until the next
836 # rather than sleep n seconds, sleep until the next
838 # multiple of n seconds
837 # multiple of n seconds
839 clock = time.time()
838 clock = time.time()
840 start = int(clock) - (int(clock) % delaywrite)
839 start = int(clock) - (int(clock) % delaywrite)
841 end = start + delaywrite
840 end = start + delaywrite
842 time.sleep(end - clock)
841 time.sleep(end - clock)
843 now = end # trust our estimate that the end is near now
842 now = end # trust our estimate that the end is near now
844 break
843 break
845
844
846 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
845 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
847 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
848 st.close()
847 st.close()
849 self._lastnormaltime = 0
848 self._lastnormaltime = 0
850 self._dirty = self._dirtypl = False
849 self._dirty = self._dirtypl = False
851
850
852 def _dirignore(self, f):
851 def _dirignore(self, f):
853 if f == '.':
852 if f == '.':
854 return False
853 return False
855 if self._ignore(f):
854 if self._ignore(f):
856 return True
855 return True
857 for p in util.finddirs(f):
856 for p in util.finddirs(f):
858 if self._ignore(p):
857 if self._ignore(p):
859 return True
858 return True
860 return False
859 return False
861
860
862 def _ignorefiles(self):
861 def _ignorefiles(self):
863 files = []
862 files = []
864 if os.path.exists(self._join('.hgignore')):
863 if os.path.exists(self._join('.hgignore')):
865 files.append(self._join('.hgignore'))
864 files.append(self._join('.hgignore'))
866 for name, path in self._ui.configitems("ui"):
865 for name, path in self._ui.configitems("ui"):
867 if name == 'ignore' or name.startswith('ignore.'):
866 if name == 'ignore' or name.startswith('ignore.'):
868 # we need to use os.path.join here rather than self._join
867 # we need to use os.path.join here rather than self._join
869 # because path is arbitrary and user-specified
868 # because path is arbitrary and user-specified
870 files.append(os.path.join(self._rootdir, util.expandpath(path)))
869 files.append(os.path.join(self._rootdir, util.expandpath(path)))
871 return files
870 return files
872
871
873 def _ignorefileandline(self, f):
872 def _ignorefileandline(self, f):
874 files = collections.deque(self._ignorefiles())
873 files = collections.deque(self._ignorefiles())
875 visited = set()
874 visited = set()
876 while files:
875 while files:
877 i = files.popleft()
876 i = files.popleft()
878 patterns = matchmod.readpatternfile(i, self._ui.warn,
877 patterns = matchmod.readpatternfile(i, self._ui.warn,
879 sourceinfo=True)
878 sourceinfo=True)
880 for pattern, lineno, line in patterns:
879 for pattern, lineno, line in patterns:
881 kind, p = matchmod._patsplit(pattern, 'glob')
880 kind, p = matchmod._patsplit(pattern, 'glob')
882 if kind == "subinclude":
881 if kind == "subinclude":
883 if p not in visited:
882 if p not in visited:
884 files.append(p)
883 files.append(p)
885 continue
884 continue
886 m = matchmod.match(self._root, '', [], [pattern],
885 m = matchmod.match(self._root, '', [], [pattern],
887 warn=self._ui.warn)
886 warn=self._ui.warn)
888 if m(f):
887 if m(f):
889 return (i, lineno, line)
888 return (i, lineno, line)
890 visited.add(i)
889 visited.add(i)
891 return (None, -1, "")
890 return (None, -1, "")
892
891
893 def _walkexplicit(self, match, subrepos):
892 def _walkexplicit(self, match, subrepos):
894 '''Get stat data about the files explicitly specified by match.
893 '''Get stat data about the files explicitly specified by match.
895
894
896 Return a triple (results, dirsfound, dirsnotfound).
895 Return a triple (results, dirsfound, dirsnotfound).
897 - results is a mapping from filename to stat result. It also contains
896 - results is a mapping from filename to stat result. It also contains
898 listings mapping subrepos and .hg to None.
897 listings mapping subrepos and .hg to None.
899 - dirsfound is a list of files found to be directories.
898 - dirsfound is a list of files found to be directories.
900 - dirsnotfound is a list of files that the dirstate thinks are
899 - dirsnotfound is a list of files that the dirstate thinks are
901 directories and that were not found.'''
900 directories and that were not found.'''
902
901
903 def badtype(mode):
902 def badtype(mode):
904 kind = _('unknown')
903 kind = _('unknown')
905 if stat.S_ISCHR(mode):
904 if stat.S_ISCHR(mode):
906 kind = _('character device')
905 kind = _('character device')
907 elif stat.S_ISBLK(mode):
906 elif stat.S_ISBLK(mode):
908 kind = _('block device')
907 kind = _('block device')
909 elif stat.S_ISFIFO(mode):
908 elif stat.S_ISFIFO(mode):
910 kind = _('fifo')
909 kind = _('fifo')
911 elif stat.S_ISSOCK(mode):
910 elif stat.S_ISSOCK(mode):
912 kind = _('socket')
911 kind = _('socket')
913 elif stat.S_ISDIR(mode):
912 elif stat.S_ISDIR(mode):
914 kind = _('directory')
913 kind = _('directory')
915 return _('unsupported file type (type is %s)') % kind
914 return _('unsupported file type (type is %s)') % kind
916
915
917 matchedir = match.explicitdir
916 matchedir = match.explicitdir
918 badfn = match.bad
917 badfn = match.bad
919 dmap = self._map
918 dmap = self._map
920 lstat = os.lstat
919 lstat = os.lstat
921 getkind = stat.S_IFMT
920 getkind = stat.S_IFMT
922 dirkind = stat.S_IFDIR
921 dirkind = stat.S_IFDIR
923 regkind = stat.S_IFREG
922 regkind = stat.S_IFREG
924 lnkkind = stat.S_IFLNK
923 lnkkind = stat.S_IFLNK
925 join = self._join
924 join = self._join
926 dirsfound = []
925 dirsfound = []
927 foundadd = dirsfound.append
926 foundadd = dirsfound.append
928 dirsnotfound = []
927 dirsnotfound = []
929 notfoundadd = dirsnotfound.append
928 notfoundadd = dirsnotfound.append
930
929
931 if not match.isexact() and self._checkcase:
930 if not match.isexact() and self._checkcase:
932 normalize = self._normalize
931 normalize = self._normalize
933 else:
932 else:
934 normalize = None
933 normalize = None
935
934
936 files = sorted(match.files())
935 files = sorted(match.files())
937 subrepos.sort()
936 subrepos.sort()
938 i, j = 0, 0
937 i, j = 0, 0
939 while i < len(files) and j < len(subrepos):
938 while i < len(files) and j < len(subrepos):
940 subpath = subrepos[j] + "/"
939 subpath = subrepos[j] + "/"
941 if files[i] < subpath:
940 if files[i] < subpath:
942 i += 1
941 i += 1
943 continue
942 continue
944 while i < len(files) and files[i].startswith(subpath):
943 while i < len(files) and files[i].startswith(subpath):
945 del files[i]
944 del files[i]
946 j += 1
945 j += 1
947
946
948 if not files or '.' in files:
947 if not files or '.' in files:
949 files = ['.']
948 files = ['.']
950 results = dict.fromkeys(subrepos)
949 results = dict.fromkeys(subrepos)
951 results['.hg'] = None
950 results['.hg'] = None
952
951
953 alldirs = None
952 alldirs = None
954 for ff in files:
953 for ff in files:
955 # constructing the foldmap is expensive, so don't do it for the
954 # constructing the foldmap is expensive, so don't do it for the
956 # common case where files is ['.']
955 # common case where files is ['.']
957 if normalize and ff != '.':
956 if normalize and ff != '.':
958 nf = normalize(ff, False, True)
957 nf = normalize(ff, False, True)
959 else:
958 else:
960 nf = ff
959 nf = ff
961 if nf in results:
960 if nf in results:
962 continue
961 continue
963
962
964 try:
963 try:
965 st = lstat(join(nf))
964 st = lstat(join(nf))
966 kind = getkind(st.st_mode)
965 kind = getkind(st.st_mode)
967 if kind == dirkind:
966 if kind == dirkind:
968 if nf in dmap:
967 if nf in dmap:
969 # file replaced by dir on disk but still in dirstate
968 # file replaced by dir on disk but still in dirstate
970 results[nf] = None
969 results[nf] = None
971 if matchedir:
970 if matchedir:
972 matchedir(nf)
971 matchedir(nf)
973 foundadd((nf, ff))
972 foundadd((nf, ff))
974 elif kind == regkind or kind == lnkkind:
973 elif kind == regkind or kind == lnkkind:
975 results[nf] = st
974 results[nf] = st
976 else:
975 else:
977 badfn(ff, badtype(kind))
976 badfn(ff, badtype(kind))
978 if nf in dmap:
977 if nf in dmap:
979 results[nf] = None
978 results[nf] = None
980 except OSError as inst: # nf not found on disk - it is dirstate only
979 except OSError as inst: # nf not found on disk - it is dirstate only
981 if nf in dmap: # does it exactly match a missing file?
980 if nf in dmap: # does it exactly match a missing file?
982 results[nf] = None
981 results[nf] = None
983 else: # does it match a missing directory?
982 else: # does it match a missing directory?
984 if alldirs is None:
983 if alldirs is None:
985 alldirs = util.dirs(dmap)
984 alldirs = util.dirs(dmap)
986 if nf in alldirs:
985 if nf in alldirs:
987 if matchedir:
986 if matchedir:
988 matchedir(nf)
987 matchedir(nf)
989 notfoundadd(nf)
988 notfoundadd(nf)
990 else:
989 else:
991 badfn(ff, inst.strerror)
990 badfn(ff, inst.strerror)
992
991
993 # Case insensitive filesystems cannot rely on lstat() failing to detect
992 # Case insensitive filesystems cannot rely on lstat() failing to detect
994 # a case-only rename. Prune the stat object for any file that does not
993 # a case-only rename. Prune the stat object for any file that does not
995 # match the case in the filesystem, if there are multiple files that
994 # match the case in the filesystem, if there are multiple files that
996 # normalize to the same path.
995 # normalize to the same path.
997 if match.isexact() and self._checkcase:
996 if match.isexact() and self._checkcase:
998 normed = {}
997 normed = {}
999
998
1000 for f, st in results.iteritems():
999 for f, st in results.iteritems():
1001 if st is None:
1000 if st is None:
1002 continue
1001 continue
1003
1002
1004 nc = util.normcase(f)
1003 nc = util.normcase(f)
1005 paths = normed.get(nc)
1004 paths = normed.get(nc)
1006
1005
1007 if paths is None:
1006 if paths is None:
1008 paths = set()
1007 paths = set()
1009 normed[nc] = paths
1008 normed[nc] = paths
1010
1009
1011 paths.add(f)
1010 paths.add(f)
1012
1011
1013 for norm, paths in normed.iteritems():
1012 for norm, paths in normed.iteritems():
1014 if len(paths) > 1:
1013 if len(paths) > 1:
1015 for path in paths:
1014 for path in paths:
1016 folded = self._discoverpath(path, norm, True, None,
1015 folded = self._discoverpath(path, norm, True, None,
1017 self._dirfoldmap)
1016 self._dirfoldmap)
1018 if path != folded:
1017 if path != folded:
1019 results[path] = None
1018 results[path] = None
1020
1019
1021 return results, dirsfound, dirsnotfound
1020 return results, dirsfound, dirsnotfound
1022
1021
1023 def walk(self, match, subrepos, unknown, ignored, full=True):
1022 def walk(self, match, subrepos, unknown, ignored, full=True):
1024 '''
1023 '''
1025 Walk recursively through the directory tree, finding all files
1024 Walk recursively through the directory tree, finding all files
1026 matched by match.
1025 matched by match.
1027
1026
1028 If full is False, maybe skip some known-clean files.
1027 If full is False, maybe skip some known-clean files.
1029
1028
1030 Return a dict mapping filename to stat-like object (either
1029 Return a dict mapping filename to stat-like object (either
1031 mercurial.osutil.stat instance or return value of os.stat()).
1030 mercurial.osutil.stat instance or return value of os.stat()).
1032
1031
1033 '''
1032 '''
1034 # full is a flag that extensions that hook into walk can use -- this
1033 # full is a flag that extensions that hook into walk can use -- this
1035 # implementation doesn't use it at all. This satisfies the contract
1034 # implementation doesn't use it at all. This satisfies the contract
1036 # because we only guarantee a "maybe".
1035 # because we only guarantee a "maybe".
1037
1036
1038 if ignored:
1037 if ignored:
1039 ignore = util.never
1038 ignore = util.never
1040 dirignore = util.never
1039 dirignore = util.never
1041 elif unknown:
1040 elif unknown:
1042 ignore = self._ignore
1041 ignore = self._ignore
1043 dirignore = self._dirignore
1042 dirignore = self._dirignore
1044 else:
1043 else:
1045 # if not unknown and not ignored, drop dir recursion and step 2
1044 # if not unknown and not ignored, drop dir recursion and step 2
1046 ignore = util.always
1045 ignore = util.always
1047 dirignore = util.always
1046 dirignore = util.always
1048
1047
1049 matchfn = match.matchfn
1048 matchfn = match.matchfn
1050 matchalways = match.always()
1049 matchalways = match.always()
1051 matchtdir = match.traversedir
1050 matchtdir = match.traversedir
1052 dmap = self._map
1051 dmap = self._map
1053 listdir = util.listdir
1052 listdir = util.listdir
1054 lstat = os.lstat
1053 lstat = os.lstat
1055 dirkind = stat.S_IFDIR
1054 dirkind = stat.S_IFDIR
1056 regkind = stat.S_IFREG
1055 regkind = stat.S_IFREG
1057 lnkkind = stat.S_IFLNK
1056 lnkkind = stat.S_IFLNK
1058 join = self._join
1057 join = self._join
1059
1058
1060 exact = skipstep3 = False
1059 exact = skipstep3 = False
1061 if match.isexact(): # match.exact
1060 if match.isexact(): # match.exact
1062 exact = True
1061 exact = True
1063 dirignore = util.always # skip step 2
1062 dirignore = util.always # skip step 2
1064 elif match.prefix(): # match.match, no patterns
1063 elif match.prefix(): # match.match, no patterns
1065 skipstep3 = True
1064 skipstep3 = True
1066
1065
1067 if not exact and self._checkcase:
1066 if not exact and self._checkcase:
1068 normalize = self._normalize
1067 normalize = self._normalize
1069 normalizefile = self._normalizefile
1068 normalizefile = self._normalizefile
1070 skipstep3 = False
1069 skipstep3 = False
1071 else:
1070 else:
1072 normalize = self._normalize
1071 normalize = self._normalize
1073 normalizefile = None
1072 normalizefile = None
1074
1073
1075 # step 1: find all explicit files
1074 # step 1: find all explicit files
1076 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1075 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1077
1076
1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1077 skipstep3 = skipstep3 and not (work or dirsnotfound)
1079 work = [d for d in work if not dirignore(d[0])]
1078 work = [d for d in work if not dirignore(d[0])]
1080
1079
1081 # step 2: visit subdirectories
1080 # step 2: visit subdirectories
1082 def traverse(work, alreadynormed):
1081 def traverse(work, alreadynormed):
1083 wadd = work.append
1082 wadd = work.append
1084 while work:
1083 while work:
1085 nd = work.pop()
1084 nd = work.pop()
1086 if not match.visitdir(nd):
1085 if not match.visitdir(nd):
1087 continue
1086 continue
1088 skip = None
1087 skip = None
1089 if nd == '.':
1088 if nd == '.':
1090 nd = ''
1089 nd = ''
1091 else:
1090 else:
1092 skip = '.hg'
1091 skip = '.hg'
1093 try:
1092 try:
1094 entries = listdir(join(nd), stat=True, skip=skip)
1093 entries = listdir(join(nd), stat=True, skip=skip)
1095 except OSError as inst:
1094 except OSError as inst:
1096 if inst.errno in (errno.EACCES, errno.ENOENT):
1095 if inst.errno in (errno.EACCES, errno.ENOENT):
1097 match.bad(self.pathto(nd), inst.strerror)
1096 match.bad(self.pathto(nd), inst.strerror)
1098 continue
1097 continue
1099 raise
1098 raise
1100 for f, kind, st in entries:
1099 for f, kind, st in entries:
1101 if normalizefile:
1100 if normalizefile:
1102 # even though f might be a directory, we're only
1101 # even though f might be a directory, we're only
1103 # interested in comparing it to files currently in the
1102 # interested in comparing it to files currently in the
1104 # dmap -- therefore normalizefile is enough
1103 # dmap -- therefore normalizefile is enough
1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1104 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1106 True)
1105 True)
1107 else:
1106 else:
1108 nf = nd and (nd + "/" + f) or f
1107 nf = nd and (nd + "/" + f) or f
1109 if nf not in results:
1108 if nf not in results:
1110 if kind == dirkind:
1109 if kind == dirkind:
1111 if not ignore(nf):
1110 if not ignore(nf):
1112 if matchtdir:
1111 if matchtdir:
1113 matchtdir(nf)
1112 matchtdir(nf)
1114 wadd(nf)
1113 wadd(nf)
1115 if nf in dmap and (matchalways or matchfn(nf)):
1114 if nf in dmap and (matchalways or matchfn(nf)):
1116 results[nf] = None
1115 results[nf] = None
1117 elif kind == regkind or kind == lnkkind:
1116 elif kind == regkind or kind == lnkkind:
1118 if nf in dmap:
1117 if nf in dmap:
1119 if matchalways or matchfn(nf):
1118 if matchalways or matchfn(nf):
1120 results[nf] = st
1119 results[nf] = st
1121 elif ((matchalways or matchfn(nf))
1120 elif ((matchalways or matchfn(nf))
1122 and not ignore(nf)):
1121 and not ignore(nf)):
1123 # unknown file -- normalize if necessary
1122 # unknown file -- normalize if necessary
1124 if not alreadynormed:
1123 if not alreadynormed:
1125 nf = normalize(nf, False, True)
1124 nf = normalize(nf, False, True)
1126 results[nf] = st
1125 results[nf] = st
1127 elif nf in dmap and (matchalways or matchfn(nf)):
1126 elif nf in dmap and (matchalways or matchfn(nf)):
1128 results[nf] = None
1127 results[nf] = None
1129
1128
1130 for nd, d in work:
1129 for nd, d in work:
1131 # alreadynormed means that processwork doesn't have to do any
1130 # alreadynormed means that processwork doesn't have to do any
1132 # expensive directory normalization
1131 # expensive directory normalization
1133 alreadynormed = not normalize or nd == d
1132 alreadynormed = not normalize or nd == d
1134 traverse([d], alreadynormed)
1133 traverse([d], alreadynormed)
1135
1134
1136 for s in subrepos:
1135 for s in subrepos:
1137 del results[s]
1136 del results[s]
1138 del results['.hg']
1137 del results['.hg']
1139
1138
1140 # step 3: visit remaining files from dmap
1139 # step 3: visit remaining files from dmap
1141 if not skipstep3 and not exact:
1140 if not skipstep3 and not exact:
1142 # If a dmap file is not in results yet, it was either
1141 # If a dmap file is not in results yet, it was either
1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1142 # a) not matching matchfn b) ignored, c) missing, or d) under a
1144 # symlink directory.
1143 # symlink directory.
1145 if not results and matchalways:
1144 if not results and matchalways:
1146 visit = [f for f in dmap]
1145 visit = [f for f in dmap]
1147 else:
1146 else:
1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1147 visit = [f for f in dmap if f not in results and matchfn(f)]
1149 visit.sort()
1148 visit.sort()
1150
1149
1151 if unknown:
1150 if unknown:
1152 # unknown == True means we walked all dirs under the roots
1151 # unknown == True means we walked all dirs under the roots
1153 # that wasn't ignored, and everything that matched was stat'ed
1152 # that wasn't ignored, and everything that matched was stat'ed
1154 # and is already in results.
1153 # and is already in results.
1155 # The rest must thus be ignored or under a symlink.
1154 # The rest must thus be ignored or under a symlink.
1156 audit_path = pathutil.pathauditor(self._root)
1155 audit_path = pathutil.pathauditor(self._root)
1157
1156
1158 for nf in iter(visit):
1157 for nf in iter(visit):
1159 # If a stat for the same file was already added with a
1158 # If a stat for the same file was already added with a
1160 # different case, don't add one for this, since that would
1159 # different case, don't add one for this, since that would
1161 # make it appear as if the file exists under both names
1160 # make it appear as if the file exists under both names
1162 # on disk.
1161 # on disk.
1163 if (normalizefile and
1162 if (normalizefile and
1164 normalizefile(nf, True, True) in results):
1163 normalizefile(nf, True, True) in results):
1165 results[nf] = None
1164 results[nf] = None
1166 # Report ignored items in the dmap as long as they are not
1165 # Report ignored items in the dmap as long as they are not
1167 # under a symlink directory.
1166 # under a symlink directory.
1168 elif audit_path.check(nf):
1167 elif audit_path.check(nf):
1169 try:
1168 try:
1170 results[nf] = lstat(join(nf))
1169 results[nf] = lstat(join(nf))
1171 # file was just ignored, no links, and exists
1170 # file was just ignored, no links, and exists
1172 except OSError:
1171 except OSError:
1173 # file doesn't exist
1172 # file doesn't exist
1174 results[nf] = None
1173 results[nf] = None
1175 else:
1174 else:
1176 # It's either missing or under a symlink directory
1175 # It's either missing or under a symlink directory
1177 # which we in this case report as missing
1176 # which we in this case report as missing
1178 results[nf] = None
1177 results[nf] = None
1179 else:
1178 else:
1180 # We may not have walked the full directory tree above,
1179 # We may not have walked the full directory tree above,
1181 # so stat and check everything we missed.
1180 # so stat and check everything we missed.
1182 iv = iter(visit)
1181 iv = iter(visit)
1183 for st in util.statfiles([join(i) for i in visit]):
1182 for st in util.statfiles([join(i) for i in visit]):
1184 results[next(iv)] = st
1183 results[next(iv)] = st
1185 return results
1184 return results
1186
1185
1187 def status(self, match, subrepos, ignored, clean, unknown):
1186 def status(self, match, subrepos, ignored, clean, unknown):
1188 '''Determine the status of the working copy relative to the
1187 '''Determine the status of the working copy relative to the
1189 dirstate and return a pair of (unsure, status), where status is of type
1188 dirstate and return a pair of (unsure, status), where status is of type
1190 scmutil.status and:
1189 scmutil.status and:
1191
1190
1192 unsure:
1191 unsure:
1193 files that might have been modified since the dirstate was
1192 files that might have been modified since the dirstate was
1194 written, but need to be read to be sure (size is the same
1193 written, but need to be read to be sure (size is the same
1195 but mtime differs)
1194 but mtime differs)
1196 status.modified:
1195 status.modified:
1197 files that have definitely been modified since the dirstate
1196 files that have definitely been modified since the dirstate
1198 was written (different size or mode)
1197 was written (different size or mode)
1199 status.clean:
1198 status.clean:
1200 files that have definitely not been modified since the
1199 files that have definitely not been modified since the
1201 dirstate was written
1200 dirstate was written
1202 '''
1201 '''
1203 listignored, listclean, listunknown = ignored, clean, unknown
1202 listignored, listclean, listunknown = ignored, clean, unknown
1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1203 lookup, modified, added, unknown, ignored = [], [], [], [], []
1205 removed, deleted, clean = [], [], []
1204 removed, deleted, clean = [], [], []
1206
1205
1207 dmap = self._map
1206 dmap = self._map
1208 ladd = lookup.append # aka "unsure"
1207 ladd = lookup.append # aka "unsure"
1209 madd = modified.append
1208 madd = modified.append
1210 aadd = added.append
1209 aadd = added.append
1211 uadd = unknown.append
1210 uadd = unknown.append
1212 iadd = ignored.append
1211 iadd = ignored.append
1213 radd = removed.append
1212 radd = removed.append
1214 dadd = deleted.append
1213 dadd = deleted.append
1215 cadd = clean.append
1214 cadd = clean.append
1216 mexact = match.exact
1215 mexact = match.exact
1217 dirignore = self._dirignore
1216 dirignore = self._dirignore
1218 checkexec = self._checkexec
1217 checkexec = self._checkexec
1219 copymap = self._copymap
1218 copymap = self._copymap
1220 lastnormaltime = self._lastnormaltime
1219 lastnormaltime = self._lastnormaltime
1221
1220
1222 # We need to do full walks when either
1221 # We need to do full walks when either
1223 # - we're listing all clean files, or
1222 # - we're listing all clean files, or
1224 # - match.traversedir does something, because match.traversedir should
1223 # - match.traversedir does something, because match.traversedir should
1225 # be called for every dir in the working dir
1224 # be called for every dir in the working dir
1226 full = listclean or match.traversedir is not None
1225 full = listclean or match.traversedir is not None
1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1226 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1228 full=full).iteritems():
1227 full=full).iteritems():
1229 if fn not in dmap:
1228 if fn not in dmap:
1230 if (listignored or mexact(fn)) and dirignore(fn):
1229 if (listignored or mexact(fn)) and dirignore(fn):
1231 if listignored:
1230 if listignored:
1232 iadd(fn)
1231 iadd(fn)
1233 else:
1232 else:
1234 uadd(fn)
1233 uadd(fn)
1235 continue
1234 continue
1236
1235
1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1236 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1238 # written like that for performance reasons. dmap[fn] is not a
1237 # written like that for performance reasons. dmap[fn] is not a
1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1238 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1240 # opcode has fast paths when the value to be unpacked is a tuple or
1239 # opcode has fast paths when the value to be unpacked is a tuple or
1241 # a list, but falls back to creating a full-fledged iterator in
1240 # a list, but falls back to creating a full-fledged iterator in
1242 # general. That is much slower than simply accessing and storing the
1241 # general. That is much slower than simply accessing and storing the
1243 # tuple members one by one.
1242 # tuple members one by one.
1244 t = dmap[fn]
1243 t = dmap[fn]
1245 state = t[0]
1244 state = t[0]
1246 mode = t[1]
1245 mode = t[1]
1247 size = t[2]
1246 size = t[2]
1248 time = t[3]
1247 time = t[3]
1249
1248
1250 if not st and state in "nma":
1249 if not st and state in "nma":
1251 dadd(fn)
1250 dadd(fn)
1252 elif state == 'n':
1251 elif state == 'n':
1253 if (size >= 0 and
1252 if (size >= 0 and
1254 ((size != st.st_size and size != st.st_size & _rangemask)
1253 ((size != st.st_size and size != st.st_size & _rangemask)
1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1254 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1256 or size == -2 # other parent
1255 or size == -2 # other parent
1257 or fn in copymap):
1256 or fn in copymap):
1258 madd(fn)
1257 madd(fn)
1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1258 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1260 ladd(fn)
1259 ladd(fn)
1261 elif st.st_mtime == lastnormaltime:
1260 elif st.st_mtime == lastnormaltime:
1262 # fn may have just been marked as normal and it may have
1261 # fn may have just been marked as normal and it may have
1263 # changed in the same second without changing its size.
1262 # changed in the same second without changing its size.
1264 # This can happen if we quickly do multiple commits.
1263 # This can happen if we quickly do multiple commits.
1265 # Force lookup, so we don't miss such a racy file change.
1264 # Force lookup, so we don't miss such a racy file change.
1266 ladd(fn)
1265 ladd(fn)
1267 elif listclean:
1266 elif listclean:
1268 cadd(fn)
1267 cadd(fn)
1269 elif state == 'm':
1268 elif state == 'm':
1270 madd(fn)
1269 madd(fn)
1271 elif state == 'a':
1270 elif state == 'a':
1272 aadd(fn)
1271 aadd(fn)
1273 elif state == 'r':
1272 elif state == 'r':
1274 radd(fn)
1273 radd(fn)
1275
1274
1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1275 return (lookup, scmutil.status(modified, added, removed, deleted,
1277 unknown, ignored, clean))
1276 unknown, ignored, clean))
1278
1277
1279 def matches(self, match):
1278 def matches(self, match):
1280 '''
1279 '''
1281 return files in the dirstate (in whatever state) filtered by match
1280 return files in the dirstate (in whatever state) filtered by match
1282 '''
1281 '''
1283 dmap = self._map
1282 dmap = self._map
1284 if match.always():
1283 if match.always():
1285 return dmap.keys()
1284 return dmap.keys()
1286 files = match.files()
1285 files = match.files()
1287 if match.isexact():
1286 if match.isexact():
1288 # fast path -- filter the other way around, since typically files is
1287 # fast path -- filter the other way around, since typically files is
1289 # much smaller than dmap
1288 # much smaller than dmap
1290 return [f for f in files if f in dmap]
1289 return [f for f in files if f in dmap]
1291 if match.prefix() and all(fn in dmap for fn in files):
1290 if match.prefix() and all(fn in dmap for fn in files):
1292 # fast path -- all the values are known to be files, so just return
1291 # fast path -- all the values are known to be files, so just return
1293 # that
1292 # that
1294 return list(files)
1293 return list(files)
1295 return [f for f in dmap if match(f)]
1294 return [f for f in dmap if match(f)]
1296
1295
1297 def _actualfilename(self, tr):
1296 def _actualfilename(self, tr):
1298 if tr:
1297 if tr:
1299 return self._pendingfilename
1298 return self._pendingfilename
1300 else:
1299 else:
1301 return self._filename
1300 return self._filename
1302
1301
1303 def savebackup(self, tr, backupname):
1302 def savebackup(self, tr, backupname):
1304 '''Save current dirstate into backup file'''
1303 '''Save current dirstate into backup file'''
1305 filename = self._actualfilename(tr)
1304 filename = self._actualfilename(tr)
1306 assert backupname != filename
1305 assert backupname != filename
1307
1306
1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1307 # use '_writedirstate' instead of 'write' to write changes certainly,
1309 # because the latter omits writing out if transaction is running.
1308 # because the latter omits writing out if transaction is running.
1310 # output file will be used to create backup of dirstate at this point.
1309 # output file will be used to create backup of dirstate at this point.
1311 if self._dirty or not self._opener.exists(filename):
1310 if self._dirty or not self._opener.exists(filename):
1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1311 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1313 checkambig=True))
1312 checkambig=True))
1314
1313
1315 if tr:
1314 if tr:
1316 # ensure that subsequent tr.writepending returns True for
1315 # ensure that subsequent tr.writepending returns True for
1317 # changes written out above, even if dirstate is never
1316 # changes written out above, even if dirstate is never
1318 # changed after this
1317 # changed after this
1319 tr.addfilegenerator('dirstate', (self._filename,),
1318 tr.addfilegenerator('dirstate', (self._filename,),
1320 self._writedirstate, location='plain')
1319 self._writedirstate, location='plain')
1321
1320
1322 # ensure that pending file written above is unlinked at
1321 # ensure that pending file written above is unlinked at
1323 # failure, even if tr.writepending isn't invoked until the
1322 # failure, even if tr.writepending isn't invoked until the
1324 # end of this transaction
1323 # end of this transaction
1325 tr.registertmp(filename, location='plain')
1324 tr.registertmp(filename, location='plain')
1326
1325
1327 self._opener.tryunlink(backupname)
1326 self._opener.tryunlink(backupname)
1328 # hardlink backup is okay because _writedirstate is always called
1327 # hardlink backup is okay because _writedirstate is always called
1329 # with an "atomictemp=True" file.
1328 # with an "atomictemp=True" file.
1330 util.copyfile(self._opener.join(filename),
1329 util.copyfile(self._opener.join(filename),
1331 self._opener.join(backupname), hardlink=True)
1330 self._opener.join(backupname), hardlink=True)
1332
1331
1333 def restorebackup(self, tr, backupname):
1332 def restorebackup(self, tr, backupname):
1334 '''Restore dirstate by backup file'''
1333 '''Restore dirstate by backup file'''
1335 # this "invalidate()" prevents "wlock.release()" from writing
1334 # this "invalidate()" prevents "wlock.release()" from writing
1336 # changes of dirstate out after restoring from backup file
1335 # changes of dirstate out after restoring from backup file
1337 self.invalidate()
1336 self.invalidate()
1338 filename = self._actualfilename(tr)
1337 filename = self._actualfilename(tr)
1339 self._opener.rename(backupname, filename, checkambig=True)
1338 self._opener.rename(backupname, filename, checkambig=True)
1340
1339
1341 def clearbackup(self, tr, backupname):
1340 def clearbackup(self, tr, backupname):
1342 '''Clear backup file'''
1341 '''Clear backup file'''
1343 self._opener.unlink(backupname)
1342 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now