##// END OF EJS Templates
dirstate: perform transactions with _copymap using single call, where possible...
Michael Bolin -
r33983:5cb0a8fe default
parent child Browse files
Show More
@@ -1,1342 +1,1337 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd).st_mtime
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 def nonnormalentries(dmap):
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap)
61 except AttributeError:
61 except AttributeError:
62 nonnorm = set()
62 nonnorm = set()
63 otherparent = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
68 otherparent.add(fname)
69 return nonnorm, otherparent
69 return nonnorm, otherparent
70
70
71 class dirstate(object):
71 class dirstate(object):
72
72
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 '''Create a new dirstate object.
74 '''Create a new dirstate object.
75
75
76 opener is an open()-like callable that can be used to open the
76 opener is an open()-like callable that can be used to open the
77 dirstate file; root is the root of the directory tracked by
77 dirstate file; root is the root of the directory tracked by
78 the dirstate.
78 the dirstate.
79 '''
79 '''
80 self._opener = opener
80 self._opener = opener
81 self._validate = validate
81 self._validate = validate
82 self._root = root
82 self._root = root
83 self._sparsematchfn = sparsematchfn
83 self._sparsematchfn = sparsematchfn
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # UNC path pointing to root share (issue4557)
85 # UNC path pointing to root share (issue4557)
86 self._rootdir = pathutil.normasprefix(root)
86 self._rootdir = pathutil.normasprefix(root)
87 self._dirty = False
87 self._dirty = False
88 self._dirtypl = False
88 self._dirtypl = False
89 self._lastnormaltime = 0
89 self._lastnormaltime = 0
90 self._ui = ui
90 self._ui = ui
91 self._filecache = {}
91 self._filecache = {}
92 self._parentwriters = 0
92 self._parentwriters = 0
93 self._filename = 'dirstate'
93 self._filename = 'dirstate'
94 self._pendingfilename = '%s.pending' % self._filename
94 self._pendingfilename = '%s.pending' % self._filename
95 self._plchangecallbacks = {}
95 self._plchangecallbacks = {}
96 self._origpl = None
96 self._origpl = None
97 self._updatedfiles = set()
97 self._updatedfiles = set()
98
98
99 # for consistent view between _pl() and _read() invocations
99 # for consistent view between _pl() and _read() invocations
100 self._pendingmode = None
100 self._pendingmode = None
101
101
102 @contextlib.contextmanager
102 @contextlib.contextmanager
103 def parentchange(self):
103 def parentchange(self):
104 '''Context manager for handling dirstate parents.
104 '''Context manager for handling dirstate parents.
105
105
106 If an exception occurs in the scope of the context manager,
106 If an exception occurs in the scope of the context manager,
107 the incoherent dirstate won't be written when wlock is
107 the incoherent dirstate won't be written when wlock is
108 released.
108 released.
109 '''
109 '''
110 self._parentwriters += 1
110 self._parentwriters += 1
111 yield
111 yield
112 # Typically we want the "undo" step of a context manager in a
112 # Typically we want the "undo" step of a context manager in a
113 # finally block so it happens even when an exception
113 # finally block so it happens even when an exception
114 # occurs. In this case, however, we only want to decrement
114 # occurs. In this case, however, we only want to decrement
115 # parentwriters if the code in the with statement exits
115 # parentwriters if the code in the with statement exits
116 # normally, so we don't have a try/finally here on purpose.
116 # normally, so we don't have a try/finally here on purpose.
117 self._parentwriters -= 1
117 self._parentwriters -= 1
118
118
119 def beginparentchange(self):
119 def beginparentchange(self):
120 '''Marks the beginning of a set of changes that involve changing
120 '''Marks the beginning of a set of changes that involve changing
121 the dirstate parents. If there is an exception during this time,
121 the dirstate parents. If there is an exception during this time,
122 the dirstate will not be written when the wlock is released. This
122 the dirstate will not be written when the wlock is released. This
123 prevents writing an incoherent dirstate where the parent doesn't
123 prevents writing an incoherent dirstate where the parent doesn't
124 match the contents.
124 match the contents.
125 '''
125 '''
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 'parentchange context manager.', '4.3')
127 'parentchange context manager.', '4.3')
128 self._parentwriters += 1
128 self._parentwriters += 1
129
129
130 def endparentchange(self):
130 def endparentchange(self):
131 '''Marks the end of a set of changes that involve changing the
131 '''Marks the end of a set of changes that involve changing the
132 dirstate parents. Once all parent changes have been marked done,
132 dirstate parents. Once all parent changes have been marked done,
133 the wlock will be free to write the dirstate on release.
133 the wlock will be free to write the dirstate on release.
134 '''
134 '''
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 'parentchange context manager.', '4.3')
136 'parentchange context manager.', '4.3')
137 if self._parentwriters > 0:
137 if self._parentwriters > 0:
138 self._parentwriters -= 1
138 self._parentwriters -= 1
139
139
140 def pendingparentchange(self):
140 def pendingparentchange(self):
141 '''Returns true if the dirstate is in the middle of a set of changes
141 '''Returns true if the dirstate is in the middle of a set of changes
142 that modify the dirstate parent.
142 that modify the dirstate parent.
143 '''
143 '''
144 return self._parentwriters > 0
144 return self._parentwriters > 0
145
145
146 @propertycache
146 @propertycache
147 def _map(self):
147 def _map(self):
148 '''Return the dirstate contents as a map from filename to
148 '''Return the dirstate contents as a map from filename to
149 (state, mode, size, time).'''
149 (state, mode, size, time).'''
150 self._read()
150 self._read()
151 return self._map
151 return self._map
152
152
153 @propertycache
153 @propertycache
154 def _copymap(self):
154 def _copymap(self):
155 self._read()
155 self._read()
156 return self._copymap
156 return self._copymap
157
157
158 @propertycache
158 @propertycache
159 def _identity(self):
159 def _identity(self):
160 self._read()
160 self._read()
161 return self._identity
161 return self._identity
162
162
163 @propertycache
163 @propertycache
164 def _nonnormalset(self):
164 def _nonnormalset(self):
165 nonnorm, otherparents = nonnormalentries(self._map)
165 nonnorm, otherparents = nonnormalentries(self._map)
166 self._otherparentset = otherparents
166 self._otherparentset = otherparents
167 return nonnorm
167 return nonnorm
168
168
169 @propertycache
169 @propertycache
170 def _otherparentset(self):
170 def _otherparentset(self):
171 nonnorm, otherparents = nonnormalentries(self._map)
171 nonnorm, otherparents = nonnormalentries(self._map)
172 self._nonnormalset = nonnorm
172 self._nonnormalset = nonnorm
173 return otherparents
173 return otherparents
174
174
175 @propertycache
175 @propertycache
176 def _filefoldmap(self):
176 def _filefoldmap(self):
177 try:
177 try:
178 makefilefoldmap = parsers.make_file_foldmap
178 makefilefoldmap = parsers.make_file_foldmap
179 except AttributeError:
179 except AttributeError:
180 pass
180 pass
181 else:
181 else:
182 return makefilefoldmap(self._map, util.normcasespec,
182 return makefilefoldmap(self._map, util.normcasespec,
183 util.normcasefallback)
183 util.normcasefallback)
184
184
185 f = {}
185 f = {}
186 normcase = util.normcase
186 normcase = util.normcase
187 for name, s in self._map.iteritems():
187 for name, s in self._map.iteritems():
188 if s[0] != 'r':
188 if s[0] != 'r':
189 f[normcase(name)] = name
189 f[normcase(name)] = name
190 f['.'] = '.' # prevents useless util.fspath() invocation
190 f['.'] = '.' # prevents useless util.fspath() invocation
191 return f
191 return f
192
192
193 @propertycache
193 @propertycache
194 def _dirfoldmap(self):
194 def _dirfoldmap(self):
195 f = {}
195 f = {}
196 normcase = util.normcase
196 normcase = util.normcase
197 for name in self._dirs:
197 for name in self._dirs:
198 f[normcase(name)] = name
198 f[normcase(name)] = name
199 return f
199 return f
200
200
201 @property
201 @property
202 def _sparsematcher(self):
202 def _sparsematcher(self):
203 """The matcher for the sparse checkout.
203 """The matcher for the sparse checkout.
204
204
205 The working directory may not include every file from a manifest. The
205 The working directory may not include every file from a manifest. The
206 matcher obtained by this property will match a path if it is to be
206 matcher obtained by this property will match a path if it is to be
207 included in the working directory.
207 included in the working directory.
208 """
208 """
209 # TODO there is potential to cache this property. For now, the matcher
209 # TODO there is potential to cache this property. For now, the matcher
210 # is resolved on every access. (But the called function does use a
210 # is resolved on every access. (But the called function does use a
211 # cache to keep the lookup fast.)
211 # cache to keep the lookup fast.)
212 return self._sparsematchfn()
212 return self._sparsematchfn()
213
213
214 @repocache('branch')
214 @repocache('branch')
215 def _branch(self):
215 def _branch(self):
216 try:
216 try:
217 return self._opener.read("branch").strip() or "default"
217 return self._opener.read("branch").strip() or "default"
218 except IOError as inst:
218 except IOError as inst:
219 if inst.errno != errno.ENOENT:
219 if inst.errno != errno.ENOENT:
220 raise
220 raise
221 return "default"
221 return "default"
222
222
223 @propertycache
223 @propertycache
224 def _pl(self):
224 def _pl(self):
225 try:
225 try:
226 fp = self._opendirstatefile()
226 fp = self._opendirstatefile()
227 st = fp.read(40)
227 st = fp.read(40)
228 fp.close()
228 fp.close()
229 l = len(st)
229 l = len(st)
230 if l == 40:
230 if l == 40:
231 return st[:20], st[20:40]
231 return st[:20], st[20:40]
232 elif l > 0 and l < 40:
232 elif l > 0 and l < 40:
233 raise error.Abort(_('working directory state appears damaged!'))
233 raise error.Abort(_('working directory state appears damaged!'))
234 except IOError as err:
234 except IOError as err:
235 if err.errno != errno.ENOENT:
235 if err.errno != errno.ENOENT:
236 raise
236 raise
237 return [nullid, nullid]
237 return [nullid, nullid]
238
238
239 @propertycache
239 @propertycache
240 def _dirs(self):
240 def _dirs(self):
241 return util.dirs(self._map, 'r')
241 return util.dirs(self._map, 'r')
242
242
243 def dirs(self):
243 def dirs(self):
244 return self._dirs
244 return self._dirs
245
245
246 @rootcache('.hgignore')
246 @rootcache('.hgignore')
247 def _ignore(self):
247 def _ignore(self):
248 files = self._ignorefiles()
248 files = self._ignorefiles()
249 if not files:
249 if not files:
250 return matchmod.never(self._root, '')
250 return matchmod.never(self._root, '')
251
251
252 pats = ['include:%s' % f for f in files]
252 pats = ['include:%s' % f for f in files]
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254
254
255 @propertycache
255 @propertycache
256 def _slash(self):
256 def _slash(self):
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258
258
259 @propertycache
259 @propertycache
260 def _checklink(self):
260 def _checklink(self):
261 return util.checklink(self._root)
261 return util.checklink(self._root)
262
262
263 @propertycache
263 @propertycache
264 def _checkexec(self):
264 def _checkexec(self):
265 return util.checkexec(self._root)
265 return util.checkexec(self._root)
266
266
267 @propertycache
267 @propertycache
268 def _checkcase(self):
268 def _checkcase(self):
269 return not util.fscasesensitive(self._join('.hg'))
269 return not util.fscasesensitive(self._join('.hg'))
270
270
271 def _join(self, f):
271 def _join(self, f):
272 # much faster than os.path.join()
272 # much faster than os.path.join()
273 # it's safe because f is always a relative path
273 # it's safe because f is always a relative path
274 return self._rootdir + f
274 return self._rootdir + f
275
275
276 def flagfunc(self, buildfallback):
276 def flagfunc(self, buildfallback):
277 if self._checklink and self._checkexec:
277 if self._checklink and self._checkexec:
278 def f(x):
278 def f(x):
279 try:
279 try:
280 st = os.lstat(self._join(x))
280 st = os.lstat(self._join(x))
281 if util.statislink(st):
281 if util.statislink(st):
282 return 'l'
282 return 'l'
283 if util.statisexec(st):
283 if util.statisexec(st):
284 return 'x'
284 return 'x'
285 except OSError:
285 except OSError:
286 pass
286 pass
287 return ''
287 return ''
288 return f
288 return f
289
289
290 fallback = buildfallback()
290 fallback = buildfallback()
291 if self._checklink:
291 if self._checklink:
292 def f(x):
292 def f(x):
293 if os.path.islink(self._join(x)):
293 if os.path.islink(self._join(x)):
294 return 'l'
294 return 'l'
295 if 'x' in fallback(x):
295 if 'x' in fallback(x):
296 return 'x'
296 return 'x'
297 return ''
297 return ''
298 return f
298 return f
299 if self._checkexec:
299 if self._checkexec:
300 def f(x):
300 def f(x):
301 if 'l' in fallback(x):
301 if 'l' in fallback(x):
302 return 'l'
302 return 'l'
303 if util.isexec(self._join(x)):
303 if util.isexec(self._join(x)):
304 return 'x'
304 return 'x'
305 return ''
305 return ''
306 return f
306 return f
307 else:
307 else:
308 return fallback
308 return fallback
309
309
310 @propertycache
310 @propertycache
311 def _cwd(self):
311 def _cwd(self):
312 # internal config: ui.forcecwd
312 # internal config: ui.forcecwd
313 forcecwd = self._ui.config('ui', 'forcecwd')
313 forcecwd = self._ui.config('ui', 'forcecwd')
314 if forcecwd:
314 if forcecwd:
315 return forcecwd
315 return forcecwd
316 return pycompat.getcwd()
316 return pycompat.getcwd()
317
317
318 def getcwd(self):
318 def getcwd(self):
319 '''Return the path from which a canonical path is calculated.
319 '''Return the path from which a canonical path is calculated.
320
320
321 This path should be used to resolve file patterns or to convert
321 This path should be used to resolve file patterns or to convert
322 canonical paths back to file paths for display. It shouldn't be
322 canonical paths back to file paths for display. It shouldn't be
323 used to get real file paths. Use vfs functions instead.
323 used to get real file paths. Use vfs functions instead.
324 '''
324 '''
325 cwd = self._cwd
325 cwd = self._cwd
326 if cwd == self._root:
326 if cwd == self._root:
327 return ''
327 return ''
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 rootsep = self._root
329 rootsep = self._root
330 if not util.endswithsep(rootsep):
330 if not util.endswithsep(rootsep):
331 rootsep += pycompat.ossep
331 rootsep += pycompat.ossep
332 if cwd.startswith(rootsep):
332 if cwd.startswith(rootsep):
333 return cwd[len(rootsep):]
333 return cwd[len(rootsep):]
334 else:
334 else:
335 # we're outside the repo. return an absolute path.
335 # we're outside the repo. return an absolute path.
336 return cwd
336 return cwd
337
337
338 def pathto(self, f, cwd=None):
338 def pathto(self, f, cwd=None):
339 if cwd is None:
339 if cwd is None:
340 cwd = self.getcwd()
340 cwd = self.getcwd()
341 path = util.pathto(self._root, cwd, f)
341 path = util.pathto(self._root, cwd, f)
342 if self._slash:
342 if self._slash:
343 return util.pconvert(path)
343 return util.pconvert(path)
344 return path
344 return path
345
345
346 def __getitem__(self, key):
346 def __getitem__(self, key):
347 '''Return the current state of key (a filename) in the dirstate.
347 '''Return the current state of key (a filename) in the dirstate.
348
348
349 States are:
349 States are:
350 n normal
350 n normal
351 m needs merging
351 m needs merging
352 r marked for removal
352 r marked for removal
353 a marked for addition
353 a marked for addition
354 ? not tracked
354 ? not tracked
355 '''
355 '''
356 return self._map.get(key, ("?",))[0]
356 return self._map.get(key, ("?",))[0]
357
357
358 def __contains__(self, key):
358 def __contains__(self, key):
359 return key in self._map
359 return key in self._map
360
360
361 def __iter__(self):
361 def __iter__(self):
362 return iter(sorted(self._map))
362 return iter(sorted(self._map))
363
363
364 def items(self):
364 def items(self):
365 return self._map.iteritems()
365 return self._map.iteritems()
366
366
367 iteritems = items
367 iteritems = items
368
368
369 def parents(self):
369 def parents(self):
370 return [self._validate(p) for p in self._pl]
370 return [self._validate(p) for p in self._pl]
371
371
372 def p1(self):
372 def p1(self):
373 return self._validate(self._pl[0])
373 return self._validate(self._pl[0])
374
374
375 def p2(self):
375 def p2(self):
376 return self._validate(self._pl[1])
376 return self._validate(self._pl[1])
377
377
378 def branch(self):
378 def branch(self):
379 return encoding.tolocal(self._branch)
379 return encoding.tolocal(self._branch)
380
380
381 def setparents(self, p1, p2=nullid):
381 def setparents(self, p1, p2=nullid):
382 """Set dirstate parents to p1 and p2.
382 """Set dirstate parents to p1 and p2.
383
383
384 When moving from two parents to one, 'm' merged entries a
384 When moving from two parents to one, 'm' merged entries a
385 adjusted to normal and previous copy records discarded and
385 adjusted to normal and previous copy records discarded and
386 returned by the call.
386 returned by the call.
387
387
388 See localrepo.setparents()
388 See localrepo.setparents()
389 """
389 """
390 if self._parentwriters == 0:
390 if self._parentwriters == 0:
391 raise ValueError("cannot set dirstate parent without "
391 raise ValueError("cannot set dirstate parent without "
392 "calling dirstate.beginparentchange")
392 "calling dirstate.beginparentchange")
393
393
394 self._dirty = self._dirtypl = True
394 self._dirty = self._dirtypl = True
395 oldp2 = self._pl[1]
395 oldp2 = self._pl[1]
396 if self._origpl is None:
396 if self._origpl is None:
397 self._origpl = self._pl
397 self._origpl = self._pl
398 self._pl = p1, p2
398 self._pl = p1, p2
399 copies = {}
399 copies = {}
400 if oldp2 != nullid and p2 == nullid:
400 if oldp2 != nullid and p2 == nullid:
401 candidatefiles = self._nonnormalset.union(self._otherparentset)
401 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 for f in candidatefiles:
402 for f in candidatefiles:
403 s = self._map.get(f)
403 s = self._map.get(f)
404 if s is None:
404 if s is None:
405 continue
405 continue
406
406
407 # Discard 'm' markers when moving away from a merge state
407 # Discard 'm' markers when moving away from a merge state
408 if s[0] == 'm':
408 if s[0] == 'm':
409 if f in self._copymap:
409 source = self._copymap.get(f)
410 copies[f] = self._copymap[f]
410 if source:
411 copies[f] = source
411 self.normallookup(f)
412 self.normallookup(f)
412 # Also fix up otherparent markers
413 # Also fix up otherparent markers
413 elif s[0] == 'n' and s[2] == -2:
414 elif s[0] == 'n' and s[2] == -2:
414 if f in self._copymap:
415 source = self._copymap.get(f)
415 copies[f] = self._copymap[f]
416 if source:
417 copies[f] = source
416 self.add(f)
418 self.add(f)
417 return copies
419 return copies
418
420
419 def setbranch(self, branch):
421 def setbranch(self, branch):
420 self._branch = encoding.fromlocal(branch)
422 self._branch = encoding.fromlocal(branch)
421 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
423 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
422 try:
424 try:
423 f.write(self._branch + '\n')
425 f.write(self._branch + '\n')
424 f.close()
426 f.close()
425
427
426 # make sure filecache has the correct stat info for _branch after
428 # make sure filecache has the correct stat info for _branch after
427 # replacing the underlying file
429 # replacing the underlying file
428 ce = self._filecache['_branch']
430 ce = self._filecache['_branch']
429 if ce:
431 if ce:
430 ce.refresh()
432 ce.refresh()
431 except: # re-raises
433 except: # re-raises
432 f.discard()
434 f.discard()
433 raise
435 raise
434
436
435 def _opendirstatefile(self):
437 def _opendirstatefile(self):
436 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
438 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
437 if self._pendingmode is not None and self._pendingmode != mode:
439 if self._pendingmode is not None and self._pendingmode != mode:
438 fp.close()
440 fp.close()
439 raise error.Abort(_('working directory state may be '
441 raise error.Abort(_('working directory state may be '
440 'changed parallelly'))
442 'changed parallelly'))
441 self._pendingmode = mode
443 self._pendingmode = mode
442 return fp
444 return fp
443
445
444 def _read(self):
446 def _read(self):
445 self._map = {}
447 self._map = {}
446 self._copymap = {}
448 self._copymap = {}
447 # ignore HG_PENDING because identity is used only for writing
449 # ignore HG_PENDING because identity is used only for writing
448 self._identity = util.filestat.frompath(
450 self._identity = util.filestat.frompath(
449 self._opener.join(self._filename))
451 self._opener.join(self._filename))
450 try:
452 try:
451 fp = self._opendirstatefile()
453 fp = self._opendirstatefile()
452 try:
454 try:
453 st = fp.read()
455 st = fp.read()
454 finally:
456 finally:
455 fp.close()
457 fp.close()
456 except IOError as err:
458 except IOError as err:
457 if err.errno != errno.ENOENT:
459 if err.errno != errno.ENOENT:
458 raise
460 raise
459 return
461 return
460 if not st:
462 if not st:
461 return
463 return
462
464
463 if util.safehasattr(parsers, 'dict_new_presized'):
465 if util.safehasattr(parsers, 'dict_new_presized'):
464 # Make an estimate of the number of files in the dirstate based on
466 # Make an estimate of the number of files in the dirstate based on
465 # its size. From a linear regression on a set of real-world repos,
467 # its size. From a linear regression on a set of real-world repos,
466 # all over 10,000 files, the size of a dirstate entry is 85
468 # all over 10,000 files, the size of a dirstate entry is 85
467 # bytes. The cost of resizing is significantly higher than the cost
469 # bytes. The cost of resizing is significantly higher than the cost
468 # of filling in a larger presized dict, so subtract 20% from the
470 # of filling in a larger presized dict, so subtract 20% from the
469 # size.
471 # size.
470 #
472 #
471 # This heuristic is imperfect in many ways, so in a future dirstate
473 # This heuristic is imperfect in many ways, so in a future dirstate
472 # format update it makes sense to just record the number of entries
474 # format update it makes sense to just record the number of entries
473 # on write.
475 # on write.
474 self._map = parsers.dict_new_presized(len(st) / 71)
476 self._map = parsers.dict_new_presized(len(st) / 71)
475
477
476 # Python's garbage collector triggers a GC each time a certain number
478 # Python's garbage collector triggers a GC each time a certain number
477 # of container objects (the number being defined by
479 # of container objects (the number being defined by
478 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
480 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
479 # for each file in the dirstate. The C version then immediately marks
481 # for each file in the dirstate. The C version then immediately marks
480 # them as not to be tracked by the collector. However, this has no
482 # them as not to be tracked by the collector. However, this has no
481 # effect on when GCs are triggered, only on what objects the GC looks
483 # effect on when GCs are triggered, only on what objects the GC looks
482 # into. This means that O(number of files) GCs are unavoidable.
484 # into. This means that O(number of files) GCs are unavoidable.
483 # Depending on when in the process's lifetime the dirstate is parsed,
485 # Depending on when in the process's lifetime the dirstate is parsed,
484 # this can get very expensive. As a workaround, disable GC while
486 # this can get very expensive. As a workaround, disable GC while
485 # parsing the dirstate.
487 # parsing the dirstate.
486 #
488 #
487 # (we cannot decorate the function directly since it is in a C module)
489 # (we cannot decorate the function directly since it is in a C module)
488 parse_dirstate = util.nogc(parsers.parse_dirstate)
490 parse_dirstate = util.nogc(parsers.parse_dirstate)
489 p = parse_dirstate(self._map, self._copymap, st)
491 p = parse_dirstate(self._map, self._copymap, st)
490 if not self._dirtypl:
492 if not self._dirtypl:
491 self._pl = p
493 self._pl = p
492
494
493 def invalidate(self):
495 def invalidate(self):
494 '''Causes the next access to reread the dirstate.
496 '''Causes the next access to reread the dirstate.
495
497
496 This is different from localrepo.invalidatedirstate() because it always
498 This is different from localrepo.invalidatedirstate() because it always
497 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
499 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
498 check whether the dirstate has changed before rereading it.'''
500 check whether the dirstate has changed before rereading it.'''
499
501
500 for a in ("_map", "_copymap", "_identity",
502 for a in ("_map", "_copymap", "_identity",
501 "_filefoldmap", "_dirfoldmap", "_branch",
503 "_filefoldmap", "_dirfoldmap", "_branch",
502 "_pl", "_dirs", "_ignore", "_nonnormalset",
504 "_pl", "_dirs", "_ignore", "_nonnormalset",
503 "_otherparentset"):
505 "_otherparentset"):
504 if a in self.__dict__:
506 if a in self.__dict__:
505 delattr(self, a)
507 delattr(self, a)
506 self._lastnormaltime = 0
508 self._lastnormaltime = 0
507 self._dirty = False
509 self._dirty = False
508 self._updatedfiles.clear()
510 self._updatedfiles.clear()
509 self._parentwriters = 0
511 self._parentwriters = 0
510 self._origpl = None
512 self._origpl = None
511
513
512 def copy(self, source, dest):
514 def copy(self, source, dest):
513 """Mark dest as a copy of source. Unmark dest if source is None."""
515 """Mark dest as a copy of source. Unmark dest if source is None."""
514 if source == dest:
516 if source == dest:
515 return
517 return
516 self._dirty = True
518 self._dirty = True
517 if source is not None:
519 if source is not None:
518 self._copymap[dest] = source
520 self._copymap[dest] = source
519 self._updatedfiles.add(source)
521 self._updatedfiles.add(source)
520 self._updatedfiles.add(dest)
522 self._updatedfiles.add(dest)
521 elif dest in self._copymap:
523 elif self._copymap.pop(dest, None):
522 del self._copymap[dest]
523 self._updatedfiles.add(dest)
524 self._updatedfiles.add(dest)
524
525
525 def copied(self, file):
526 def copied(self, file):
526 return self._copymap.get(file, None)
527 return self._copymap.get(file, None)
527
528
528 def copies(self):
529 def copies(self):
529 return self._copymap
530 return self._copymap
530
531
531 def _droppath(self, f):
532 def _droppath(self, f):
532 if self[f] not in "?r" and "_dirs" in self.__dict__:
533 if self[f] not in "?r" and "_dirs" in self.__dict__:
533 self._dirs.delpath(f)
534 self._dirs.delpath(f)
534
535
535 if "_filefoldmap" in self.__dict__:
536 if "_filefoldmap" in self.__dict__:
536 normed = util.normcase(f)
537 normed = util.normcase(f)
537 if normed in self._filefoldmap:
538 if normed in self._filefoldmap:
538 del self._filefoldmap[normed]
539 del self._filefoldmap[normed]
539
540
540 self._updatedfiles.add(f)
541 self._updatedfiles.add(f)
541
542
542 def _addpath(self, f, state, mode, size, mtime):
543 def _addpath(self, f, state, mode, size, mtime):
543 oldstate = self[f]
544 oldstate = self[f]
544 if state == 'a' or oldstate == 'r':
545 if state == 'a' or oldstate == 'r':
545 scmutil.checkfilename(f)
546 scmutil.checkfilename(f)
546 if f in self._dirs:
547 if f in self._dirs:
547 raise error.Abort(_('directory %r already in dirstate') % f)
548 raise error.Abort(_('directory %r already in dirstate') % f)
548 # shadows
549 # shadows
549 for d in util.finddirs(f):
550 for d in util.finddirs(f):
550 if d in self._dirs:
551 if d in self._dirs:
551 break
552 break
552 if d in self._map and self[d] != 'r':
553 if d in self._map and self[d] != 'r':
553 raise error.Abort(
554 raise error.Abort(
554 _('file %r in dirstate clashes with %r') % (d, f))
555 _('file %r in dirstate clashes with %r') % (d, f))
555 if oldstate in "?r" and "_dirs" in self.__dict__:
556 if oldstate in "?r" and "_dirs" in self.__dict__:
556 self._dirs.addpath(f)
557 self._dirs.addpath(f)
557 self._dirty = True
558 self._dirty = True
558 self._updatedfiles.add(f)
559 self._updatedfiles.add(f)
559 self._map[f] = dirstatetuple(state, mode, size, mtime)
560 self._map[f] = dirstatetuple(state, mode, size, mtime)
560 if state != 'n' or mtime == -1:
561 if state != 'n' or mtime == -1:
561 self._nonnormalset.add(f)
562 self._nonnormalset.add(f)
562 if size == -2:
563 if size == -2:
563 self._otherparentset.add(f)
564 self._otherparentset.add(f)
564
565
565 def normal(self, f):
566 def normal(self, f):
566 '''Mark a file normal and clean.'''
567 '''Mark a file normal and clean.'''
567 s = os.lstat(self._join(f))
568 s = os.lstat(self._join(f))
568 mtime = s.st_mtime
569 mtime = s.st_mtime
569 self._addpath(f, 'n', s.st_mode,
570 self._addpath(f, 'n', s.st_mode,
570 s.st_size & _rangemask, mtime & _rangemask)
571 s.st_size & _rangemask, mtime & _rangemask)
571 if f in self._copymap:
572 self._copymap.pop(f, None)
572 del self._copymap[f]
573 if f in self._nonnormalset:
573 if f in self._nonnormalset:
574 self._nonnormalset.remove(f)
574 self._nonnormalset.remove(f)
575 if mtime > self._lastnormaltime:
575 if mtime > self._lastnormaltime:
576 # Remember the most recent modification timeslot for status(),
576 # Remember the most recent modification timeslot for status(),
577 # to make sure we won't miss future size-preserving file content
577 # to make sure we won't miss future size-preserving file content
578 # modifications that happen within the same timeslot.
578 # modifications that happen within the same timeslot.
579 self._lastnormaltime = mtime
579 self._lastnormaltime = mtime
580
580
581 def normallookup(self, f):
581 def normallookup(self, f):
582 '''Mark a file normal, but possibly dirty.'''
582 '''Mark a file normal, but possibly dirty.'''
583 if self._pl[1] != nullid and f in self._map:
583 if self._pl[1] != nullid and f in self._map:
584 # if there is a merge going on and the file was either
584 # if there is a merge going on and the file was either
585 # in state 'm' (-1) or coming from other parent (-2) before
585 # in state 'm' (-1) or coming from other parent (-2) before
586 # being removed, restore that state.
586 # being removed, restore that state.
587 entry = self._map[f]
587 entry = self._map[f]
588 if entry[0] == 'r' and entry[2] in (-1, -2):
588 if entry[0] == 'r' and entry[2] in (-1, -2):
589 source = self._copymap.get(f)
589 source = self._copymap.get(f)
590 if entry[2] == -1:
590 if entry[2] == -1:
591 self.merge(f)
591 self.merge(f)
592 elif entry[2] == -2:
592 elif entry[2] == -2:
593 self.otherparent(f)
593 self.otherparent(f)
594 if source:
594 if source:
595 self.copy(source, f)
595 self.copy(source, f)
596 return
596 return
597 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
597 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
598 return
598 return
599 self._addpath(f, 'n', 0, -1, -1)
599 self._addpath(f, 'n', 0, -1, -1)
600 if f in self._copymap:
600 self._copymap.pop(f, None)
601 del self._copymap[f]
602 if f in self._nonnormalset:
601 if f in self._nonnormalset:
603 self._nonnormalset.remove(f)
602 self._nonnormalset.remove(f)
604
603
605 def otherparent(self, f):
604 def otherparent(self, f):
606 '''Mark as coming from the other parent, always dirty.'''
605 '''Mark as coming from the other parent, always dirty.'''
607 if self._pl[1] == nullid:
606 if self._pl[1] == nullid:
608 raise error.Abort(_("setting %r to other parent "
607 raise error.Abort(_("setting %r to other parent "
609 "only allowed in merges") % f)
608 "only allowed in merges") % f)
610 if f in self and self[f] == 'n':
609 if f in self and self[f] == 'n':
611 # merge-like
610 # merge-like
612 self._addpath(f, 'm', 0, -2, -1)
611 self._addpath(f, 'm', 0, -2, -1)
613 else:
612 else:
614 # add-like
613 # add-like
615 self._addpath(f, 'n', 0, -2, -1)
614 self._addpath(f, 'n', 0, -2, -1)
616
615 self._copymap.pop(f, None)
617 if f in self._copymap:
618 del self._copymap[f]
619
616
620 def add(self, f):
617 def add(self, f):
621 '''Mark a file added.'''
618 '''Mark a file added.'''
622 self._addpath(f, 'a', 0, -1, -1)
619 self._addpath(f, 'a', 0, -1, -1)
623 if f in self._copymap:
620 self._copymap.pop(f, None)
624 del self._copymap[f]
625
621
626 def remove(self, f):
622 def remove(self, f):
627 '''Mark a file removed.'''
623 '''Mark a file removed.'''
628 self._dirty = True
624 self._dirty = True
629 self._droppath(f)
625 self._droppath(f)
630 size = 0
626 size = 0
631 if self._pl[1] != nullid and f in self._map:
627 if self._pl[1] != nullid and f in self._map:
632 # backup the previous state
628 # backup the previous state
633 entry = self._map[f]
629 entry = self._map[f]
634 if entry[0] == 'm': # merge
630 if entry[0] == 'm': # merge
635 size = -1
631 size = -1
636 elif entry[0] == 'n' and entry[2] == -2: # other parent
632 elif entry[0] == 'n' and entry[2] == -2: # other parent
637 size = -2
633 size = -2
638 self._otherparentset.add(f)
634 self._otherparentset.add(f)
639 self._map[f] = dirstatetuple('r', 0, size, 0)
635 self._map[f] = dirstatetuple('r', 0, size, 0)
640 self._nonnormalset.add(f)
636 self._nonnormalset.add(f)
641 if size == 0 and f in self._copymap:
637 if size == 0:
642 del self._copymap[f]
638 self._copymap.pop(f, None)
643
639
644 def merge(self, f):
640 def merge(self, f):
645 '''Mark a file merged.'''
641 '''Mark a file merged.'''
646 if self._pl[1] == nullid:
642 if self._pl[1] == nullid:
647 return self.normallookup(f)
643 return self.normallookup(f)
648 return self.otherparent(f)
644 return self.otherparent(f)
649
645
650 def drop(self, f):
646 def drop(self, f):
651 '''Drop a file from the dirstate'''
647 '''Drop a file from the dirstate'''
652 if f in self._map:
648 if f in self._map:
653 self._dirty = True
649 self._dirty = True
654 self._droppath(f)
650 self._droppath(f)
655 del self._map[f]
651 del self._map[f]
656 if f in self._nonnormalset:
652 if f in self._nonnormalset:
657 self._nonnormalset.remove(f)
653 self._nonnormalset.remove(f)
658 if f in self._copymap:
654 self._copymap.pop(f, None)
659 del self._copymap[f]
660
655
661 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
656 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 if exists is None:
657 if exists is None:
663 exists = os.path.lexists(os.path.join(self._root, path))
658 exists = os.path.lexists(os.path.join(self._root, path))
664 if not exists:
659 if not exists:
665 # Maybe a path component exists
660 # Maybe a path component exists
666 if not ignoremissing and '/' in path:
661 if not ignoremissing and '/' in path:
667 d, f = path.rsplit('/', 1)
662 d, f = path.rsplit('/', 1)
668 d = self._normalize(d, False, ignoremissing, None)
663 d = self._normalize(d, False, ignoremissing, None)
669 folded = d + "/" + f
664 folded = d + "/" + f
670 else:
665 else:
671 # No path components, preserve original case
666 # No path components, preserve original case
672 folded = path
667 folded = path
673 else:
668 else:
674 # recursively normalize leading directory components
669 # recursively normalize leading directory components
675 # against dirstate
670 # against dirstate
676 if '/' in normed:
671 if '/' in normed:
677 d, f = normed.rsplit('/', 1)
672 d, f = normed.rsplit('/', 1)
678 d = self._normalize(d, False, ignoremissing, True)
673 d = self._normalize(d, False, ignoremissing, True)
679 r = self._root + "/" + d
674 r = self._root + "/" + d
680 folded = d + "/" + util.fspath(f, r)
675 folded = d + "/" + util.fspath(f, r)
681 else:
676 else:
682 folded = util.fspath(normed, self._root)
677 folded = util.fspath(normed, self._root)
683 storemap[normed] = folded
678 storemap[normed] = folded
684
679
685 return folded
680 return folded
686
681
687 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
682 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 normed = util.normcase(path)
683 normed = util.normcase(path)
689 folded = self._filefoldmap.get(normed, None)
684 folded = self._filefoldmap.get(normed, None)
690 if folded is None:
685 if folded is None:
691 if isknown:
686 if isknown:
692 folded = path
687 folded = path
693 else:
688 else:
694 folded = self._discoverpath(path, normed, ignoremissing, exists,
689 folded = self._discoverpath(path, normed, ignoremissing, exists,
695 self._filefoldmap)
690 self._filefoldmap)
696 return folded
691 return folded
697
692
698 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
693 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
699 normed = util.normcase(path)
694 normed = util.normcase(path)
700 folded = self._filefoldmap.get(normed, None)
695 folded = self._filefoldmap.get(normed, None)
701 if folded is None:
696 if folded is None:
702 folded = self._dirfoldmap.get(normed, None)
697 folded = self._dirfoldmap.get(normed, None)
703 if folded is None:
698 if folded is None:
704 if isknown:
699 if isknown:
705 folded = path
700 folded = path
706 else:
701 else:
707 # store discovered result in dirfoldmap so that future
702 # store discovered result in dirfoldmap so that future
708 # normalizefile calls don't start matching directories
703 # normalizefile calls don't start matching directories
709 folded = self._discoverpath(path, normed, ignoremissing, exists,
704 folded = self._discoverpath(path, normed, ignoremissing, exists,
710 self._dirfoldmap)
705 self._dirfoldmap)
711 return folded
706 return folded
712
707
713 def normalize(self, path, isknown=False, ignoremissing=False):
708 def normalize(self, path, isknown=False, ignoremissing=False):
714 '''
709 '''
715 normalize the case of a pathname when on a casefolding filesystem
710 normalize the case of a pathname when on a casefolding filesystem
716
711
717 isknown specifies whether the filename came from walking the
712 isknown specifies whether the filename came from walking the
718 disk, to avoid extra filesystem access.
713 disk, to avoid extra filesystem access.
719
714
720 If ignoremissing is True, missing path are returned
715 If ignoremissing is True, missing path are returned
721 unchanged. Otherwise, we try harder to normalize possibly
716 unchanged. Otherwise, we try harder to normalize possibly
722 existing path components.
717 existing path components.
723
718
724 The normalized case is determined based on the following precedence:
719 The normalized case is determined based on the following precedence:
725
720
726 - version of name already stored in the dirstate
721 - version of name already stored in the dirstate
727 - version of name stored on disk
722 - version of name stored on disk
728 - version provided via command arguments
723 - version provided via command arguments
729 '''
724 '''
730
725
731 if self._checkcase:
726 if self._checkcase:
732 return self._normalize(path, isknown, ignoremissing)
727 return self._normalize(path, isknown, ignoremissing)
733 return path
728 return path
734
729
735 def clear(self):
730 def clear(self):
736 self._map = {}
731 self._map = {}
737 self._nonnormalset = set()
732 self._nonnormalset = set()
738 self._otherparentset = set()
733 self._otherparentset = set()
739 if "_dirs" in self.__dict__:
734 if "_dirs" in self.__dict__:
740 delattr(self, "_dirs")
735 delattr(self, "_dirs")
741 self._copymap = {}
736 self._copymap = {}
742 self._pl = [nullid, nullid]
737 self._pl = [nullid, nullid]
743 self._lastnormaltime = 0
738 self._lastnormaltime = 0
744 self._updatedfiles.clear()
739 self._updatedfiles.clear()
745 self._dirty = True
740 self._dirty = True
746
741
747 def rebuild(self, parent, allfiles, changedfiles=None):
742 def rebuild(self, parent, allfiles, changedfiles=None):
748 if changedfiles is None:
743 if changedfiles is None:
749 # Rebuild entire dirstate
744 # Rebuild entire dirstate
750 changedfiles = allfiles
745 changedfiles = allfiles
751 lastnormaltime = self._lastnormaltime
746 lastnormaltime = self._lastnormaltime
752 self.clear()
747 self.clear()
753 self._lastnormaltime = lastnormaltime
748 self._lastnormaltime = lastnormaltime
754
749
755 if self._origpl is None:
750 if self._origpl is None:
756 self._origpl = self._pl
751 self._origpl = self._pl
757 self._pl = (parent, nullid)
752 self._pl = (parent, nullid)
758 for f in changedfiles:
753 for f in changedfiles:
759 if f in allfiles:
754 if f in allfiles:
760 self.normallookup(f)
755 self.normallookup(f)
761 else:
756 else:
762 self.drop(f)
757 self.drop(f)
763
758
764 self._dirty = True
759 self._dirty = True
765
760
766 def identity(self):
761 def identity(self):
767 '''Return identity of dirstate itself to detect changing in storage
762 '''Return identity of dirstate itself to detect changing in storage
768
763
769 If identity of previous dirstate is equal to this, writing
764 If identity of previous dirstate is equal to this, writing
770 changes based on the former dirstate out can keep consistency.
765 changes based on the former dirstate out can keep consistency.
771 '''
766 '''
772 return self._identity
767 return self._identity
773
768
774 def write(self, tr):
769 def write(self, tr):
775 if not self._dirty:
770 if not self._dirty:
776 return
771 return
777
772
778 filename = self._filename
773 filename = self._filename
779 if tr:
774 if tr:
780 # 'dirstate.write()' is not only for writing in-memory
775 # 'dirstate.write()' is not only for writing in-memory
781 # changes out, but also for dropping ambiguous timestamp.
776 # changes out, but also for dropping ambiguous timestamp.
782 # delayed writing re-raise "ambiguous timestamp issue".
777 # delayed writing re-raise "ambiguous timestamp issue".
783 # See also the wiki page below for detail:
778 # See also the wiki page below for detail:
784 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
779 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
785
780
786 # emulate dropping timestamp in 'parsers.pack_dirstate'
781 # emulate dropping timestamp in 'parsers.pack_dirstate'
787 now = _getfsnow(self._opener)
782 now = _getfsnow(self._opener)
788 dmap = self._map
783 dmap = self._map
789 for f in self._updatedfiles:
784 for f in self._updatedfiles:
790 e = dmap.get(f)
785 e = dmap.get(f)
791 if e is not None and e[0] == 'n' and e[3] == now:
786 if e is not None and e[0] == 'n' and e[3] == now:
792 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
787 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
793 self._nonnormalset.add(f)
788 self._nonnormalset.add(f)
794
789
795 # emulate that all 'dirstate.normal' results are written out
790 # emulate that all 'dirstate.normal' results are written out
796 self._lastnormaltime = 0
791 self._lastnormaltime = 0
797 self._updatedfiles.clear()
792 self._updatedfiles.clear()
798
793
799 # delay writing in-memory changes out
794 # delay writing in-memory changes out
800 tr.addfilegenerator('dirstate', (self._filename,),
795 tr.addfilegenerator('dirstate', (self._filename,),
801 self._writedirstate, location='plain')
796 self._writedirstate, location='plain')
802 return
797 return
803
798
804 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
799 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
805 self._writedirstate(st)
800 self._writedirstate(st)
806
801
807 def addparentchangecallback(self, category, callback):
802 def addparentchangecallback(self, category, callback):
808 """add a callback to be called when the wd parents are changed
803 """add a callback to be called when the wd parents are changed
809
804
810 Callback will be called with the following arguments:
805 Callback will be called with the following arguments:
811 dirstate, (oldp1, oldp2), (newp1, newp2)
806 dirstate, (oldp1, oldp2), (newp1, newp2)
812
807
813 Category is a unique identifier to allow overwriting an old callback
808 Category is a unique identifier to allow overwriting an old callback
814 with a newer callback.
809 with a newer callback.
815 """
810 """
816 self._plchangecallbacks[category] = callback
811 self._plchangecallbacks[category] = callback
817
812
818 def _writedirstate(self, st):
813 def _writedirstate(self, st):
819 # notify callbacks about parents change
814 # notify callbacks about parents change
820 if self._origpl is not None and self._origpl != self._pl:
815 if self._origpl is not None and self._origpl != self._pl:
821 for c, callback in sorted(self._plchangecallbacks.iteritems()):
816 for c, callback in sorted(self._plchangecallbacks.iteritems()):
822 callback(self, self._origpl, self._pl)
817 callback(self, self._origpl, self._pl)
823 self._origpl = None
818 self._origpl = None
824 # use the modification time of the newly created temporary file as the
819 # use the modification time of the newly created temporary file as the
825 # filesystem's notion of 'now'
820 # filesystem's notion of 'now'
826 now = util.fstat(st).st_mtime & _rangemask
821 now = util.fstat(st).st_mtime & _rangemask
827
822
828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
823 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 # timestamp of each entries in dirstate, because of 'now > mtime'
824 # timestamp of each entries in dirstate, because of 'now > mtime'
830 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
825 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
831 if delaywrite > 0:
826 if delaywrite > 0:
832 # do we have any files to delay for?
827 # do we have any files to delay for?
833 for f, e in self._map.iteritems():
828 for f, e in self._map.iteritems():
834 if e[0] == 'n' and e[3] == now:
829 if e[0] == 'n' and e[3] == now:
835 import time # to avoid useless import
830 import time # to avoid useless import
836 # rather than sleep n seconds, sleep until the next
831 # rather than sleep n seconds, sleep until the next
837 # multiple of n seconds
832 # multiple of n seconds
838 clock = time.time()
833 clock = time.time()
839 start = int(clock) - (int(clock) % delaywrite)
834 start = int(clock) - (int(clock) % delaywrite)
840 end = start + delaywrite
835 end = start + delaywrite
841 time.sleep(end - clock)
836 time.sleep(end - clock)
842 now = end # trust our estimate that the end is near now
837 now = end # trust our estimate that the end is near now
843 break
838 break
844
839
845 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
840 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
841 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
847 st.close()
842 st.close()
848 self._lastnormaltime = 0
843 self._lastnormaltime = 0
849 self._dirty = self._dirtypl = False
844 self._dirty = self._dirtypl = False
850
845
851 def _dirignore(self, f):
846 def _dirignore(self, f):
852 if f == '.':
847 if f == '.':
853 return False
848 return False
854 if self._ignore(f):
849 if self._ignore(f):
855 return True
850 return True
856 for p in util.finddirs(f):
851 for p in util.finddirs(f):
857 if self._ignore(p):
852 if self._ignore(p):
858 return True
853 return True
859 return False
854 return False
860
855
861 def _ignorefiles(self):
856 def _ignorefiles(self):
862 files = []
857 files = []
863 if os.path.exists(self._join('.hgignore')):
858 if os.path.exists(self._join('.hgignore')):
864 files.append(self._join('.hgignore'))
859 files.append(self._join('.hgignore'))
865 for name, path in self._ui.configitems("ui"):
860 for name, path in self._ui.configitems("ui"):
866 if name == 'ignore' or name.startswith('ignore.'):
861 if name == 'ignore' or name.startswith('ignore.'):
867 # we need to use os.path.join here rather than self._join
862 # we need to use os.path.join here rather than self._join
868 # because path is arbitrary and user-specified
863 # because path is arbitrary and user-specified
869 files.append(os.path.join(self._rootdir, util.expandpath(path)))
864 files.append(os.path.join(self._rootdir, util.expandpath(path)))
870 return files
865 return files
871
866
872 def _ignorefileandline(self, f):
867 def _ignorefileandline(self, f):
873 files = collections.deque(self._ignorefiles())
868 files = collections.deque(self._ignorefiles())
874 visited = set()
869 visited = set()
875 while files:
870 while files:
876 i = files.popleft()
871 i = files.popleft()
877 patterns = matchmod.readpatternfile(i, self._ui.warn,
872 patterns = matchmod.readpatternfile(i, self._ui.warn,
878 sourceinfo=True)
873 sourceinfo=True)
879 for pattern, lineno, line in patterns:
874 for pattern, lineno, line in patterns:
880 kind, p = matchmod._patsplit(pattern, 'glob')
875 kind, p = matchmod._patsplit(pattern, 'glob')
881 if kind == "subinclude":
876 if kind == "subinclude":
882 if p not in visited:
877 if p not in visited:
883 files.append(p)
878 files.append(p)
884 continue
879 continue
885 m = matchmod.match(self._root, '', [], [pattern],
880 m = matchmod.match(self._root, '', [], [pattern],
886 warn=self._ui.warn)
881 warn=self._ui.warn)
887 if m(f):
882 if m(f):
888 return (i, lineno, line)
883 return (i, lineno, line)
889 visited.add(i)
884 visited.add(i)
890 return (None, -1, "")
885 return (None, -1, "")
891
886
892 def _walkexplicit(self, match, subrepos):
887 def _walkexplicit(self, match, subrepos):
893 '''Get stat data about the files explicitly specified by match.
888 '''Get stat data about the files explicitly specified by match.
894
889
895 Return a triple (results, dirsfound, dirsnotfound).
890 Return a triple (results, dirsfound, dirsnotfound).
896 - results is a mapping from filename to stat result. It also contains
891 - results is a mapping from filename to stat result. It also contains
897 listings mapping subrepos and .hg to None.
892 listings mapping subrepos and .hg to None.
898 - dirsfound is a list of files found to be directories.
893 - dirsfound is a list of files found to be directories.
899 - dirsnotfound is a list of files that the dirstate thinks are
894 - dirsnotfound is a list of files that the dirstate thinks are
900 directories and that were not found.'''
895 directories and that were not found.'''
901
896
902 def badtype(mode):
897 def badtype(mode):
903 kind = _('unknown')
898 kind = _('unknown')
904 if stat.S_ISCHR(mode):
899 if stat.S_ISCHR(mode):
905 kind = _('character device')
900 kind = _('character device')
906 elif stat.S_ISBLK(mode):
901 elif stat.S_ISBLK(mode):
907 kind = _('block device')
902 kind = _('block device')
908 elif stat.S_ISFIFO(mode):
903 elif stat.S_ISFIFO(mode):
909 kind = _('fifo')
904 kind = _('fifo')
910 elif stat.S_ISSOCK(mode):
905 elif stat.S_ISSOCK(mode):
911 kind = _('socket')
906 kind = _('socket')
912 elif stat.S_ISDIR(mode):
907 elif stat.S_ISDIR(mode):
913 kind = _('directory')
908 kind = _('directory')
914 return _('unsupported file type (type is %s)') % kind
909 return _('unsupported file type (type is %s)') % kind
915
910
916 matchedir = match.explicitdir
911 matchedir = match.explicitdir
917 badfn = match.bad
912 badfn = match.bad
918 dmap = self._map
913 dmap = self._map
919 lstat = os.lstat
914 lstat = os.lstat
920 getkind = stat.S_IFMT
915 getkind = stat.S_IFMT
921 dirkind = stat.S_IFDIR
916 dirkind = stat.S_IFDIR
922 regkind = stat.S_IFREG
917 regkind = stat.S_IFREG
923 lnkkind = stat.S_IFLNK
918 lnkkind = stat.S_IFLNK
924 join = self._join
919 join = self._join
925 dirsfound = []
920 dirsfound = []
926 foundadd = dirsfound.append
921 foundadd = dirsfound.append
927 dirsnotfound = []
922 dirsnotfound = []
928 notfoundadd = dirsnotfound.append
923 notfoundadd = dirsnotfound.append
929
924
930 if not match.isexact() and self._checkcase:
925 if not match.isexact() and self._checkcase:
931 normalize = self._normalize
926 normalize = self._normalize
932 else:
927 else:
933 normalize = None
928 normalize = None
934
929
935 files = sorted(match.files())
930 files = sorted(match.files())
936 subrepos.sort()
931 subrepos.sort()
937 i, j = 0, 0
932 i, j = 0, 0
938 while i < len(files) and j < len(subrepos):
933 while i < len(files) and j < len(subrepos):
939 subpath = subrepos[j] + "/"
934 subpath = subrepos[j] + "/"
940 if files[i] < subpath:
935 if files[i] < subpath:
941 i += 1
936 i += 1
942 continue
937 continue
943 while i < len(files) and files[i].startswith(subpath):
938 while i < len(files) and files[i].startswith(subpath):
944 del files[i]
939 del files[i]
945 j += 1
940 j += 1
946
941
947 if not files or '.' in files:
942 if not files or '.' in files:
948 files = ['.']
943 files = ['.']
949 results = dict.fromkeys(subrepos)
944 results = dict.fromkeys(subrepos)
950 results['.hg'] = None
945 results['.hg'] = None
951
946
952 alldirs = None
947 alldirs = None
953 for ff in files:
948 for ff in files:
954 # constructing the foldmap is expensive, so don't do it for the
949 # constructing the foldmap is expensive, so don't do it for the
955 # common case where files is ['.']
950 # common case where files is ['.']
956 if normalize and ff != '.':
951 if normalize and ff != '.':
957 nf = normalize(ff, False, True)
952 nf = normalize(ff, False, True)
958 else:
953 else:
959 nf = ff
954 nf = ff
960 if nf in results:
955 if nf in results:
961 continue
956 continue
962
957
963 try:
958 try:
964 st = lstat(join(nf))
959 st = lstat(join(nf))
965 kind = getkind(st.st_mode)
960 kind = getkind(st.st_mode)
966 if kind == dirkind:
961 if kind == dirkind:
967 if nf in dmap:
962 if nf in dmap:
968 # file replaced by dir on disk but still in dirstate
963 # file replaced by dir on disk but still in dirstate
969 results[nf] = None
964 results[nf] = None
970 if matchedir:
965 if matchedir:
971 matchedir(nf)
966 matchedir(nf)
972 foundadd((nf, ff))
967 foundadd((nf, ff))
973 elif kind == regkind or kind == lnkkind:
968 elif kind == regkind or kind == lnkkind:
974 results[nf] = st
969 results[nf] = st
975 else:
970 else:
976 badfn(ff, badtype(kind))
971 badfn(ff, badtype(kind))
977 if nf in dmap:
972 if nf in dmap:
978 results[nf] = None
973 results[nf] = None
979 except OSError as inst: # nf not found on disk - it is dirstate only
974 except OSError as inst: # nf not found on disk - it is dirstate only
980 if nf in dmap: # does it exactly match a missing file?
975 if nf in dmap: # does it exactly match a missing file?
981 results[nf] = None
976 results[nf] = None
982 else: # does it match a missing directory?
977 else: # does it match a missing directory?
983 if alldirs is None:
978 if alldirs is None:
984 alldirs = util.dirs(dmap)
979 alldirs = util.dirs(dmap)
985 if nf in alldirs:
980 if nf in alldirs:
986 if matchedir:
981 if matchedir:
987 matchedir(nf)
982 matchedir(nf)
988 notfoundadd(nf)
983 notfoundadd(nf)
989 else:
984 else:
990 badfn(ff, inst.strerror)
985 badfn(ff, inst.strerror)
991
986
992 # Case insensitive filesystems cannot rely on lstat() failing to detect
987 # Case insensitive filesystems cannot rely on lstat() failing to detect
993 # a case-only rename. Prune the stat object for any file that does not
988 # a case-only rename. Prune the stat object for any file that does not
994 # match the case in the filesystem, if there are multiple files that
989 # match the case in the filesystem, if there are multiple files that
995 # normalize to the same path.
990 # normalize to the same path.
996 if match.isexact() and self._checkcase:
991 if match.isexact() and self._checkcase:
997 normed = {}
992 normed = {}
998
993
999 for f, st in results.iteritems():
994 for f, st in results.iteritems():
1000 if st is None:
995 if st is None:
1001 continue
996 continue
1002
997
1003 nc = util.normcase(f)
998 nc = util.normcase(f)
1004 paths = normed.get(nc)
999 paths = normed.get(nc)
1005
1000
1006 if paths is None:
1001 if paths is None:
1007 paths = set()
1002 paths = set()
1008 normed[nc] = paths
1003 normed[nc] = paths
1009
1004
1010 paths.add(f)
1005 paths.add(f)
1011
1006
1012 for norm, paths in normed.iteritems():
1007 for norm, paths in normed.iteritems():
1013 if len(paths) > 1:
1008 if len(paths) > 1:
1014 for path in paths:
1009 for path in paths:
1015 folded = self._discoverpath(path, norm, True, None,
1010 folded = self._discoverpath(path, norm, True, None,
1016 self._dirfoldmap)
1011 self._dirfoldmap)
1017 if path != folded:
1012 if path != folded:
1018 results[path] = None
1013 results[path] = None
1019
1014
1020 return results, dirsfound, dirsnotfound
1015 return results, dirsfound, dirsnotfound
1021
1016
1022 def walk(self, match, subrepos, unknown, ignored, full=True):
1017 def walk(self, match, subrepos, unknown, ignored, full=True):
1023 '''
1018 '''
1024 Walk recursively through the directory tree, finding all files
1019 Walk recursively through the directory tree, finding all files
1025 matched by match.
1020 matched by match.
1026
1021
1027 If full is False, maybe skip some known-clean files.
1022 If full is False, maybe skip some known-clean files.
1028
1023
1029 Return a dict mapping filename to stat-like object (either
1024 Return a dict mapping filename to stat-like object (either
1030 mercurial.osutil.stat instance or return value of os.stat()).
1025 mercurial.osutil.stat instance or return value of os.stat()).
1031
1026
1032 '''
1027 '''
1033 # full is a flag that extensions that hook into walk can use -- this
1028 # full is a flag that extensions that hook into walk can use -- this
1034 # implementation doesn't use it at all. This satisfies the contract
1029 # implementation doesn't use it at all. This satisfies the contract
1035 # because we only guarantee a "maybe".
1030 # because we only guarantee a "maybe".
1036
1031
1037 if ignored:
1032 if ignored:
1038 ignore = util.never
1033 ignore = util.never
1039 dirignore = util.never
1034 dirignore = util.never
1040 elif unknown:
1035 elif unknown:
1041 ignore = self._ignore
1036 ignore = self._ignore
1042 dirignore = self._dirignore
1037 dirignore = self._dirignore
1043 else:
1038 else:
1044 # if not unknown and not ignored, drop dir recursion and step 2
1039 # if not unknown and not ignored, drop dir recursion and step 2
1045 ignore = util.always
1040 ignore = util.always
1046 dirignore = util.always
1041 dirignore = util.always
1047
1042
1048 matchfn = match.matchfn
1043 matchfn = match.matchfn
1049 matchalways = match.always()
1044 matchalways = match.always()
1050 matchtdir = match.traversedir
1045 matchtdir = match.traversedir
1051 dmap = self._map
1046 dmap = self._map
1052 listdir = util.listdir
1047 listdir = util.listdir
1053 lstat = os.lstat
1048 lstat = os.lstat
1054 dirkind = stat.S_IFDIR
1049 dirkind = stat.S_IFDIR
1055 regkind = stat.S_IFREG
1050 regkind = stat.S_IFREG
1056 lnkkind = stat.S_IFLNK
1051 lnkkind = stat.S_IFLNK
1057 join = self._join
1052 join = self._join
1058
1053
1059 exact = skipstep3 = False
1054 exact = skipstep3 = False
1060 if match.isexact(): # match.exact
1055 if match.isexact(): # match.exact
1061 exact = True
1056 exact = True
1062 dirignore = util.always # skip step 2
1057 dirignore = util.always # skip step 2
1063 elif match.prefix(): # match.match, no patterns
1058 elif match.prefix(): # match.match, no patterns
1064 skipstep3 = True
1059 skipstep3 = True
1065
1060
1066 if not exact and self._checkcase:
1061 if not exact and self._checkcase:
1067 normalize = self._normalize
1062 normalize = self._normalize
1068 normalizefile = self._normalizefile
1063 normalizefile = self._normalizefile
1069 skipstep3 = False
1064 skipstep3 = False
1070 else:
1065 else:
1071 normalize = self._normalize
1066 normalize = self._normalize
1072 normalizefile = None
1067 normalizefile = None
1073
1068
1074 # step 1: find all explicit files
1069 # step 1: find all explicit files
1075 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1070 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1076
1071
1077 skipstep3 = skipstep3 and not (work or dirsnotfound)
1072 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 work = [d for d in work if not dirignore(d[0])]
1073 work = [d for d in work if not dirignore(d[0])]
1079
1074
1080 # step 2: visit subdirectories
1075 # step 2: visit subdirectories
1081 def traverse(work, alreadynormed):
1076 def traverse(work, alreadynormed):
1082 wadd = work.append
1077 wadd = work.append
1083 while work:
1078 while work:
1084 nd = work.pop()
1079 nd = work.pop()
1085 if not match.visitdir(nd):
1080 if not match.visitdir(nd):
1086 continue
1081 continue
1087 skip = None
1082 skip = None
1088 if nd == '.':
1083 if nd == '.':
1089 nd = ''
1084 nd = ''
1090 else:
1085 else:
1091 skip = '.hg'
1086 skip = '.hg'
1092 try:
1087 try:
1093 entries = listdir(join(nd), stat=True, skip=skip)
1088 entries = listdir(join(nd), stat=True, skip=skip)
1094 except OSError as inst:
1089 except OSError as inst:
1095 if inst.errno in (errno.EACCES, errno.ENOENT):
1090 if inst.errno in (errno.EACCES, errno.ENOENT):
1096 match.bad(self.pathto(nd), inst.strerror)
1091 match.bad(self.pathto(nd), inst.strerror)
1097 continue
1092 continue
1098 raise
1093 raise
1099 for f, kind, st in entries:
1094 for f, kind, st in entries:
1100 if normalizefile:
1095 if normalizefile:
1101 # even though f might be a directory, we're only
1096 # even though f might be a directory, we're only
1102 # interested in comparing it to files currently in the
1097 # interested in comparing it to files currently in the
1103 # dmap -- therefore normalizefile is enough
1098 # dmap -- therefore normalizefile is enough
1104 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1099 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1105 True)
1100 True)
1106 else:
1101 else:
1107 nf = nd and (nd + "/" + f) or f
1102 nf = nd and (nd + "/" + f) or f
1108 if nf not in results:
1103 if nf not in results:
1109 if kind == dirkind:
1104 if kind == dirkind:
1110 if not ignore(nf):
1105 if not ignore(nf):
1111 if matchtdir:
1106 if matchtdir:
1112 matchtdir(nf)
1107 matchtdir(nf)
1113 wadd(nf)
1108 wadd(nf)
1114 if nf in dmap and (matchalways or matchfn(nf)):
1109 if nf in dmap and (matchalways or matchfn(nf)):
1115 results[nf] = None
1110 results[nf] = None
1116 elif kind == regkind or kind == lnkkind:
1111 elif kind == regkind or kind == lnkkind:
1117 if nf in dmap:
1112 if nf in dmap:
1118 if matchalways or matchfn(nf):
1113 if matchalways or matchfn(nf):
1119 results[nf] = st
1114 results[nf] = st
1120 elif ((matchalways or matchfn(nf))
1115 elif ((matchalways or matchfn(nf))
1121 and not ignore(nf)):
1116 and not ignore(nf)):
1122 # unknown file -- normalize if necessary
1117 # unknown file -- normalize if necessary
1123 if not alreadynormed:
1118 if not alreadynormed:
1124 nf = normalize(nf, False, True)
1119 nf = normalize(nf, False, True)
1125 results[nf] = st
1120 results[nf] = st
1126 elif nf in dmap and (matchalways or matchfn(nf)):
1121 elif nf in dmap and (matchalways or matchfn(nf)):
1127 results[nf] = None
1122 results[nf] = None
1128
1123
1129 for nd, d in work:
1124 for nd, d in work:
1130 # alreadynormed means that processwork doesn't have to do any
1125 # alreadynormed means that processwork doesn't have to do any
1131 # expensive directory normalization
1126 # expensive directory normalization
1132 alreadynormed = not normalize or nd == d
1127 alreadynormed = not normalize or nd == d
1133 traverse([d], alreadynormed)
1128 traverse([d], alreadynormed)
1134
1129
1135 for s in subrepos:
1130 for s in subrepos:
1136 del results[s]
1131 del results[s]
1137 del results['.hg']
1132 del results['.hg']
1138
1133
1139 # step 3: visit remaining files from dmap
1134 # step 3: visit remaining files from dmap
1140 if not skipstep3 and not exact:
1135 if not skipstep3 and not exact:
1141 # If a dmap file is not in results yet, it was either
1136 # If a dmap file is not in results yet, it was either
1142 # a) not matching matchfn b) ignored, c) missing, or d) under a
1137 # a) not matching matchfn b) ignored, c) missing, or d) under a
1143 # symlink directory.
1138 # symlink directory.
1144 if not results and matchalways:
1139 if not results and matchalways:
1145 visit = [f for f in dmap]
1140 visit = [f for f in dmap]
1146 else:
1141 else:
1147 visit = [f for f in dmap if f not in results and matchfn(f)]
1142 visit = [f for f in dmap if f not in results and matchfn(f)]
1148 visit.sort()
1143 visit.sort()
1149
1144
1150 if unknown:
1145 if unknown:
1151 # unknown == True means we walked all dirs under the roots
1146 # unknown == True means we walked all dirs under the roots
1152 # that wasn't ignored, and everything that matched was stat'ed
1147 # that wasn't ignored, and everything that matched was stat'ed
1153 # and is already in results.
1148 # and is already in results.
1154 # The rest must thus be ignored or under a symlink.
1149 # The rest must thus be ignored or under a symlink.
1155 audit_path = pathutil.pathauditor(self._root, cached=True)
1150 audit_path = pathutil.pathauditor(self._root, cached=True)
1156
1151
1157 for nf in iter(visit):
1152 for nf in iter(visit):
1158 # If a stat for the same file was already added with a
1153 # If a stat for the same file was already added with a
1159 # different case, don't add one for this, since that would
1154 # different case, don't add one for this, since that would
1160 # make it appear as if the file exists under both names
1155 # make it appear as if the file exists under both names
1161 # on disk.
1156 # on disk.
1162 if (normalizefile and
1157 if (normalizefile and
1163 normalizefile(nf, True, True) in results):
1158 normalizefile(nf, True, True) in results):
1164 results[nf] = None
1159 results[nf] = None
1165 # Report ignored items in the dmap as long as they are not
1160 # Report ignored items in the dmap as long as they are not
1166 # under a symlink directory.
1161 # under a symlink directory.
1167 elif audit_path.check(nf):
1162 elif audit_path.check(nf):
1168 try:
1163 try:
1169 results[nf] = lstat(join(nf))
1164 results[nf] = lstat(join(nf))
1170 # file was just ignored, no links, and exists
1165 # file was just ignored, no links, and exists
1171 except OSError:
1166 except OSError:
1172 # file doesn't exist
1167 # file doesn't exist
1173 results[nf] = None
1168 results[nf] = None
1174 else:
1169 else:
1175 # It's either missing or under a symlink directory
1170 # It's either missing or under a symlink directory
1176 # which we in this case report as missing
1171 # which we in this case report as missing
1177 results[nf] = None
1172 results[nf] = None
1178 else:
1173 else:
1179 # We may not have walked the full directory tree above,
1174 # We may not have walked the full directory tree above,
1180 # so stat and check everything we missed.
1175 # so stat and check everything we missed.
1181 iv = iter(visit)
1176 iv = iter(visit)
1182 for st in util.statfiles([join(i) for i in visit]):
1177 for st in util.statfiles([join(i) for i in visit]):
1183 results[next(iv)] = st
1178 results[next(iv)] = st
1184 return results
1179 return results
1185
1180
1186 def status(self, match, subrepos, ignored, clean, unknown):
1181 def status(self, match, subrepos, ignored, clean, unknown):
1187 '''Determine the status of the working copy relative to the
1182 '''Determine the status of the working copy relative to the
1188 dirstate and return a pair of (unsure, status), where status is of type
1183 dirstate and return a pair of (unsure, status), where status is of type
1189 scmutil.status and:
1184 scmutil.status and:
1190
1185
1191 unsure:
1186 unsure:
1192 files that might have been modified since the dirstate was
1187 files that might have been modified since the dirstate was
1193 written, but need to be read to be sure (size is the same
1188 written, but need to be read to be sure (size is the same
1194 but mtime differs)
1189 but mtime differs)
1195 status.modified:
1190 status.modified:
1196 files that have definitely been modified since the dirstate
1191 files that have definitely been modified since the dirstate
1197 was written (different size or mode)
1192 was written (different size or mode)
1198 status.clean:
1193 status.clean:
1199 files that have definitely not been modified since the
1194 files that have definitely not been modified since the
1200 dirstate was written
1195 dirstate was written
1201 '''
1196 '''
1202 listignored, listclean, listunknown = ignored, clean, unknown
1197 listignored, listclean, listunknown = ignored, clean, unknown
1203 lookup, modified, added, unknown, ignored = [], [], [], [], []
1198 lookup, modified, added, unknown, ignored = [], [], [], [], []
1204 removed, deleted, clean = [], [], []
1199 removed, deleted, clean = [], [], []
1205
1200
1206 dmap = self._map
1201 dmap = self._map
1207 ladd = lookup.append # aka "unsure"
1202 ladd = lookup.append # aka "unsure"
1208 madd = modified.append
1203 madd = modified.append
1209 aadd = added.append
1204 aadd = added.append
1210 uadd = unknown.append
1205 uadd = unknown.append
1211 iadd = ignored.append
1206 iadd = ignored.append
1212 radd = removed.append
1207 radd = removed.append
1213 dadd = deleted.append
1208 dadd = deleted.append
1214 cadd = clean.append
1209 cadd = clean.append
1215 mexact = match.exact
1210 mexact = match.exact
1216 dirignore = self._dirignore
1211 dirignore = self._dirignore
1217 checkexec = self._checkexec
1212 checkexec = self._checkexec
1218 copymap = self._copymap
1213 copymap = self._copymap
1219 lastnormaltime = self._lastnormaltime
1214 lastnormaltime = self._lastnormaltime
1220
1215
1221 # We need to do full walks when either
1216 # We need to do full walks when either
1222 # - we're listing all clean files, or
1217 # - we're listing all clean files, or
1223 # - match.traversedir does something, because match.traversedir should
1218 # - match.traversedir does something, because match.traversedir should
1224 # be called for every dir in the working dir
1219 # be called for every dir in the working dir
1225 full = listclean or match.traversedir is not None
1220 full = listclean or match.traversedir is not None
1226 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1221 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1227 full=full).iteritems():
1222 full=full).iteritems():
1228 if fn not in dmap:
1223 if fn not in dmap:
1229 if (listignored or mexact(fn)) and dirignore(fn):
1224 if (listignored or mexact(fn)) and dirignore(fn):
1230 if listignored:
1225 if listignored:
1231 iadd(fn)
1226 iadd(fn)
1232 else:
1227 else:
1233 uadd(fn)
1228 uadd(fn)
1234 continue
1229 continue
1235
1230
1236 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1231 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1237 # written like that for performance reasons. dmap[fn] is not a
1232 # written like that for performance reasons. dmap[fn] is not a
1238 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1233 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1239 # opcode has fast paths when the value to be unpacked is a tuple or
1234 # opcode has fast paths when the value to be unpacked is a tuple or
1240 # a list, but falls back to creating a full-fledged iterator in
1235 # a list, but falls back to creating a full-fledged iterator in
1241 # general. That is much slower than simply accessing and storing the
1236 # general. That is much slower than simply accessing and storing the
1242 # tuple members one by one.
1237 # tuple members one by one.
1243 t = dmap[fn]
1238 t = dmap[fn]
1244 state = t[0]
1239 state = t[0]
1245 mode = t[1]
1240 mode = t[1]
1246 size = t[2]
1241 size = t[2]
1247 time = t[3]
1242 time = t[3]
1248
1243
1249 if not st and state in "nma":
1244 if not st and state in "nma":
1250 dadd(fn)
1245 dadd(fn)
1251 elif state == 'n':
1246 elif state == 'n':
1252 if (size >= 0 and
1247 if (size >= 0 and
1253 ((size != st.st_size and size != st.st_size & _rangemask)
1248 ((size != st.st_size and size != st.st_size & _rangemask)
1254 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1249 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1255 or size == -2 # other parent
1250 or size == -2 # other parent
1256 or fn in copymap):
1251 or fn in copymap):
1257 madd(fn)
1252 madd(fn)
1258 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1253 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1259 ladd(fn)
1254 ladd(fn)
1260 elif st.st_mtime == lastnormaltime:
1255 elif st.st_mtime == lastnormaltime:
1261 # fn may have just been marked as normal and it may have
1256 # fn may have just been marked as normal and it may have
1262 # changed in the same second without changing its size.
1257 # changed in the same second without changing its size.
1263 # This can happen if we quickly do multiple commits.
1258 # This can happen if we quickly do multiple commits.
1264 # Force lookup, so we don't miss such a racy file change.
1259 # Force lookup, so we don't miss such a racy file change.
1265 ladd(fn)
1260 ladd(fn)
1266 elif listclean:
1261 elif listclean:
1267 cadd(fn)
1262 cadd(fn)
1268 elif state == 'm':
1263 elif state == 'm':
1269 madd(fn)
1264 madd(fn)
1270 elif state == 'a':
1265 elif state == 'a':
1271 aadd(fn)
1266 aadd(fn)
1272 elif state == 'r':
1267 elif state == 'r':
1273 radd(fn)
1268 radd(fn)
1274
1269
1275 return (lookup, scmutil.status(modified, added, removed, deleted,
1270 return (lookup, scmutil.status(modified, added, removed, deleted,
1276 unknown, ignored, clean))
1271 unknown, ignored, clean))
1277
1272
1278 def matches(self, match):
1273 def matches(self, match):
1279 '''
1274 '''
1280 return files in the dirstate (in whatever state) filtered by match
1275 return files in the dirstate (in whatever state) filtered by match
1281 '''
1276 '''
1282 dmap = self._map
1277 dmap = self._map
1283 if match.always():
1278 if match.always():
1284 return dmap.keys()
1279 return dmap.keys()
1285 files = match.files()
1280 files = match.files()
1286 if match.isexact():
1281 if match.isexact():
1287 # fast path -- filter the other way around, since typically files is
1282 # fast path -- filter the other way around, since typically files is
1288 # much smaller than dmap
1283 # much smaller than dmap
1289 return [f for f in files if f in dmap]
1284 return [f for f in files if f in dmap]
1290 if match.prefix() and all(fn in dmap for fn in files):
1285 if match.prefix() and all(fn in dmap for fn in files):
1291 # fast path -- all the values are known to be files, so just return
1286 # fast path -- all the values are known to be files, so just return
1292 # that
1287 # that
1293 return list(files)
1288 return list(files)
1294 return [f for f in dmap if match(f)]
1289 return [f for f in dmap if match(f)]
1295
1290
1296 def _actualfilename(self, tr):
1291 def _actualfilename(self, tr):
1297 if tr:
1292 if tr:
1298 return self._pendingfilename
1293 return self._pendingfilename
1299 else:
1294 else:
1300 return self._filename
1295 return self._filename
1301
1296
1302 def savebackup(self, tr, backupname):
1297 def savebackup(self, tr, backupname):
1303 '''Save current dirstate into backup file'''
1298 '''Save current dirstate into backup file'''
1304 filename = self._actualfilename(tr)
1299 filename = self._actualfilename(tr)
1305 assert backupname != filename
1300 assert backupname != filename
1306
1301
1307 # use '_writedirstate' instead of 'write' to write changes certainly,
1302 # use '_writedirstate' instead of 'write' to write changes certainly,
1308 # because the latter omits writing out if transaction is running.
1303 # because the latter omits writing out if transaction is running.
1309 # output file will be used to create backup of dirstate at this point.
1304 # output file will be used to create backup of dirstate at this point.
1310 if self._dirty or not self._opener.exists(filename):
1305 if self._dirty or not self._opener.exists(filename):
1311 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1306 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1312 checkambig=True))
1307 checkambig=True))
1313
1308
1314 if tr:
1309 if tr:
1315 # ensure that subsequent tr.writepending returns True for
1310 # ensure that subsequent tr.writepending returns True for
1316 # changes written out above, even if dirstate is never
1311 # changes written out above, even if dirstate is never
1317 # changed after this
1312 # changed after this
1318 tr.addfilegenerator('dirstate', (self._filename,),
1313 tr.addfilegenerator('dirstate', (self._filename,),
1319 self._writedirstate, location='plain')
1314 self._writedirstate, location='plain')
1320
1315
1321 # ensure that pending file written above is unlinked at
1316 # ensure that pending file written above is unlinked at
1322 # failure, even if tr.writepending isn't invoked until the
1317 # failure, even if tr.writepending isn't invoked until the
1323 # end of this transaction
1318 # end of this transaction
1324 tr.registertmp(filename, location='plain')
1319 tr.registertmp(filename, location='plain')
1325
1320
1326 self._opener.tryunlink(backupname)
1321 self._opener.tryunlink(backupname)
1327 # hardlink backup is okay because _writedirstate is always called
1322 # hardlink backup is okay because _writedirstate is always called
1328 # with an "atomictemp=True" file.
1323 # with an "atomictemp=True" file.
1329 util.copyfile(self._opener.join(filename),
1324 util.copyfile(self._opener.join(filename),
1330 self._opener.join(backupname), hardlink=True)
1325 self._opener.join(backupname), hardlink=True)
1331
1326
1332 def restorebackup(self, tr, backupname):
1327 def restorebackup(self, tr, backupname):
1333 '''Restore dirstate by backup file'''
1328 '''Restore dirstate by backup file'''
1334 # this "invalidate()" prevents "wlock.release()" from writing
1329 # this "invalidate()" prevents "wlock.release()" from writing
1335 # changes of dirstate out after restoring from backup file
1330 # changes of dirstate out after restoring from backup file
1336 self.invalidate()
1331 self.invalidate()
1337 filename = self._actualfilename(tr)
1332 filename = self._actualfilename(tr)
1338 self._opener.rename(backupname, filename, checkambig=True)
1333 self._opener.rename(backupname, filename, checkambig=True)
1339
1334
1340 def clearbackup(self, tr, backupname):
1335 def clearbackup(self, tr, backupname):
1341 '''Clear backup file'''
1336 '''Clear backup file'''
1342 self._opener.unlink(backupname)
1337 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now