##// END OF EJS Templates
dirstate: create new dirstatemap class...
Durham Goode -
r34333:b36881c6 default
parent child Browse files
Show More
@@ -1,1341 +1,1371 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd).st_mtime
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 def nonnormalentries(dmap):
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap._map)
61 except AttributeError:
61 except AttributeError:
62 nonnorm = set()
62 nonnorm = set()
63 otherparent = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
68 otherparent.add(fname)
69 return nonnorm, otherparent
69 return nonnorm, otherparent
70
70
71 class dirstate(object):
71 class dirstate(object):
72
72
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 '''Create a new dirstate object.
74 '''Create a new dirstate object.
75
75
76 opener is an open()-like callable that can be used to open the
76 opener is an open()-like callable that can be used to open the
77 dirstate file; root is the root of the directory tracked by
77 dirstate file; root is the root of the directory tracked by
78 the dirstate.
78 the dirstate.
79 '''
79 '''
80 self._opener = opener
80 self._opener = opener
81 self._validate = validate
81 self._validate = validate
82 self._root = root
82 self._root = root
83 self._sparsematchfn = sparsematchfn
83 self._sparsematchfn = sparsematchfn
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # UNC path pointing to root share (issue4557)
85 # UNC path pointing to root share (issue4557)
86 self._rootdir = pathutil.normasprefix(root)
86 self._rootdir = pathutil.normasprefix(root)
87 self._dirty = False
87 self._dirty = False
88 self._dirtypl = False
88 self._dirtypl = False
89 self._lastnormaltime = 0
89 self._lastnormaltime = 0
90 self._ui = ui
90 self._ui = ui
91 self._filecache = {}
91 self._filecache = {}
92 self._parentwriters = 0
92 self._parentwriters = 0
93 self._filename = 'dirstate'
93 self._filename = 'dirstate'
94 self._pendingfilename = '%s.pending' % self._filename
94 self._pendingfilename = '%s.pending' % self._filename
95 self._plchangecallbacks = {}
95 self._plchangecallbacks = {}
96 self._origpl = None
96 self._origpl = None
97 self._updatedfiles = set()
97 self._updatedfiles = set()
98
98
99 # for consistent view between _pl() and _read() invocations
99 # for consistent view between _pl() and _read() invocations
100 self._pendingmode = None
100 self._pendingmode = None
101
101
102 @contextlib.contextmanager
102 @contextlib.contextmanager
103 def parentchange(self):
103 def parentchange(self):
104 '''Context manager for handling dirstate parents.
104 '''Context manager for handling dirstate parents.
105
105
106 If an exception occurs in the scope of the context manager,
106 If an exception occurs in the scope of the context manager,
107 the incoherent dirstate won't be written when wlock is
107 the incoherent dirstate won't be written when wlock is
108 released.
108 released.
109 '''
109 '''
110 self._parentwriters += 1
110 self._parentwriters += 1
111 yield
111 yield
112 # Typically we want the "undo" step of a context manager in a
112 # Typically we want the "undo" step of a context manager in a
113 # finally block so it happens even when an exception
113 # finally block so it happens even when an exception
114 # occurs. In this case, however, we only want to decrement
114 # occurs. In this case, however, we only want to decrement
115 # parentwriters if the code in the with statement exits
115 # parentwriters if the code in the with statement exits
116 # normally, so we don't have a try/finally here on purpose.
116 # normally, so we don't have a try/finally here on purpose.
117 self._parentwriters -= 1
117 self._parentwriters -= 1
118
118
119 def beginparentchange(self):
119 def beginparentchange(self):
120 '''Marks the beginning of a set of changes that involve changing
120 '''Marks the beginning of a set of changes that involve changing
121 the dirstate parents. If there is an exception during this time,
121 the dirstate parents. If there is an exception during this time,
122 the dirstate will not be written when the wlock is released. This
122 the dirstate will not be written when the wlock is released. This
123 prevents writing an incoherent dirstate where the parent doesn't
123 prevents writing an incoherent dirstate where the parent doesn't
124 match the contents.
124 match the contents.
125 '''
125 '''
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 'parentchange context manager.', '4.3')
127 'parentchange context manager.', '4.3')
128 self._parentwriters += 1
128 self._parentwriters += 1
129
129
130 def endparentchange(self):
130 def endparentchange(self):
131 '''Marks the end of a set of changes that involve changing the
131 '''Marks the end of a set of changes that involve changing the
132 dirstate parents. Once all parent changes have been marked done,
132 dirstate parents. Once all parent changes have been marked done,
133 the wlock will be free to write the dirstate on release.
133 the wlock will be free to write the dirstate on release.
134 '''
134 '''
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 'parentchange context manager.', '4.3')
136 'parentchange context manager.', '4.3')
137 if self._parentwriters > 0:
137 if self._parentwriters > 0:
138 self._parentwriters -= 1
138 self._parentwriters -= 1
139
139
140 def pendingparentchange(self):
140 def pendingparentchange(self):
141 '''Returns true if the dirstate is in the middle of a set of changes
141 '''Returns true if the dirstate is in the middle of a set of changes
142 that modify the dirstate parent.
142 that modify the dirstate parent.
143 '''
143 '''
144 return self._parentwriters > 0
144 return self._parentwriters > 0
145
145
146 @propertycache
146 @propertycache
147 def _map(self):
147 def _map(self):
148 '''Return the dirstate contents as a map from filename to
148 '''Return the dirstate contents as a map from filename to
149 (state, mode, size, time).'''
149 (state, mode, size, time).'''
150 self._read()
150 self._read()
151 return self._map
151 return self._map
152
152
153 @propertycache
153 @propertycache
154 def _copymap(self):
154 def _copymap(self):
155 self._read()
155 self._read()
156 return self._copymap
156 return self._copymap
157
157
158 @propertycache
158 @propertycache
159 def _identity(self):
159 def _identity(self):
160 self._read()
160 self._read()
161 return self._identity
161 return self._identity
162
162
163 @propertycache
163 @propertycache
164 def _nonnormalset(self):
164 def _nonnormalset(self):
165 nonnorm, otherparents = nonnormalentries(self._map)
165 nonnorm, otherparents = nonnormalentries(self._map)
166 self._otherparentset = otherparents
166 self._otherparentset = otherparents
167 return nonnorm
167 return nonnorm
168
168
169 @propertycache
169 @propertycache
170 def _otherparentset(self):
170 def _otherparentset(self):
171 nonnorm, otherparents = nonnormalentries(self._map)
171 nonnorm, otherparents = nonnormalentries(self._map)
172 self._nonnormalset = nonnorm
172 self._nonnormalset = nonnorm
173 return otherparents
173 return otherparents
174
174
175 @propertycache
175 @propertycache
176 def _filefoldmap(self):
176 def _filefoldmap(self):
177 try:
177 try:
178 makefilefoldmap = parsers.make_file_foldmap
178 makefilefoldmap = parsers.make_file_foldmap
179 except AttributeError:
179 except AttributeError:
180 pass
180 pass
181 else:
181 else:
182 return makefilefoldmap(self._map, util.normcasespec,
182 return makefilefoldmap(self._map._map, util.normcasespec,
183 util.normcasefallback)
183 util.normcasefallback)
184
184
185 f = {}
185 f = {}
186 normcase = util.normcase
186 normcase = util.normcase
187 for name, s in self._map.iteritems():
187 for name, s in self._map.iteritems():
188 if s[0] != 'r':
188 if s[0] != 'r':
189 f[normcase(name)] = name
189 f[normcase(name)] = name
190 f['.'] = '.' # prevents useless util.fspath() invocation
190 f['.'] = '.' # prevents useless util.fspath() invocation
191 return f
191 return f
192
192
193 @propertycache
193 @propertycache
194 def _dirfoldmap(self):
194 def _dirfoldmap(self):
195 f = {}
195 f = {}
196 normcase = util.normcase
196 normcase = util.normcase
197 for name in self._dirs:
197 for name in self._dirs:
198 f[normcase(name)] = name
198 f[normcase(name)] = name
199 return f
199 return f
200
200
201 @property
201 @property
202 def _sparsematcher(self):
202 def _sparsematcher(self):
203 """The matcher for the sparse checkout.
203 """The matcher for the sparse checkout.
204
204
205 The working directory may not include every file from a manifest. The
205 The working directory may not include every file from a manifest. The
206 matcher obtained by this property will match a path if it is to be
206 matcher obtained by this property will match a path if it is to be
207 included in the working directory.
207 included in the working directory.
208 """
208 """
209 # TODO there is potential to cache this property. For now, the matcher
209 # TODO there is potential to cache this property. For now, the matcher
210 # is resolved on every access. (But the called function does use a
210 # is resolved on every access. (But the called function does use a
211 # cache to keep the lookup fast.)
211 # cache to keep the lookup fast.)
212 return self._sparsematchfn()
212 return self._sparsematchfn()
213
213
214 @repocache('branch')
214 @repocache('branch')
215 def _branch(self):
215 def _branch(self):
216 try:
216 try:
217 return self._opener.read("branch").strip() or "default"
217 return self._opener.read("branch").strip() or "default"
218 except IOError as inst:
218 except IOError as inst:
219 if inst.errno != errno.ENOENT:
219 if inst.errno != errno.ENOENT:
220 raise
220 raise
221 return "default"
221 return "default"
222
222
223 @propertycache
223 @propertycache
224 def _pl(self):
224 def _pl(self):
225 try:
225 try:
226 fp = self._opendirstatefile()
226 fp = self._opendirstatefile()
227 st = fp.read(40)
227 st = fp.read(40)
228 fp.close()
228 fp.close()
229 l = len(st)
229 l = len(st)
230 if l == 40:
230 if l == 40:
231 return st[:20], st[20:40]
231 return st[:20], st[20:40]
232 elif l > 0 and l < 40:
232 elif l > 0 and l < 40:
233 raise error.Abort(_('working directory state appears damaged!'))
233 raise error.Abort(_('working directory state appears damaged!'))
234 except IOError as err:
234 except IOError as err:
235 if err.errno != errno.ENOENT:
235 if err.errno != errno.ENOENT:
236 raise
236 raise
237 return [nullid, nullid]
237 return [nullid, nullid]
238
238
239 @propertycache
239 @propertycache
240 def _dirs(self):
240 def _dirs(self):
241 return util.dirs(self._map, 'r')
241 return util.dirs(self._map._map, 'r')
242
242
243 def dirs(self):
243 def dirs(self):
244 return self._dirs
244 return self._dirs
245
245
246 @rootcache('.hgignore')
246 @rootcache('.hgignore')
247 def _ignore(self):
247 def _ignore(self):
248 files = self._ignorefiles()
248 files = self._ignorefiles()
249 if not files:
249 if not files:
250 return matchmod.never(self._root, '')
250 return matchmod.never(self._root, '')
251
251
252 pats = ['include:%s' % f for f in files]
252 pats = ['include:%s' % f for f in files]
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254
254
255 @propertycache
255 @propertycache
256 def _slash(self):
256 def _slash(self):
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258
258
259 @propertycache
259 @propertycache
260 def _checklink(self):
260 def _checklink(self):
261 return util.checklink(self._root)
261 return util.checklink(self._root)
262
262
263 @propertycache
263 @propertycache
264 def _checkexec(self):
264 def _checkexec(self):
265 return util.checkexec(self._root)
265 return util.checkexec(self._root)
266
266
267 @propertycache
267 @propertycache
268 def _checkcase(self):
268 def _checkcase(self):
269 return not util.fscasesensitive(self._join('.hg'))
269 return not util.fscasesensitive(self._join('.hg'))
270
270
271 def _join(self, f):
271 def _join(self, f):
272 # much faster than os.path.join()
272 # much faster than os.path.join()
273 # it's safe because f is always a relative path
273 # it's safe because f is always a relative path
274 return self._rootdir + f
274 return self._rootdir + f
275
275
276 def flagfunc(self, buildfallback):
276 def flagfunc(self, buildfallback):
277 if self._checklink and self._checkexec:
277 if self._checklink and self._checkexec:
278 def f(x):
278 def f(x):
279 try:
279 try:
280 st = os.lstat(self._join(x))
280 st = os.lstat(self._join(x))
281 if util.statislink(st):
281 if util.statislink(st):
282 return 'l'
282 return 'l'
283 if util.statisexec(st):
283 if util.statisexec(st):
284 return 'x'
284 return 'x'
285 except OSError:
285 except OSError:
286 pass
286 pass
287 return ''
287 return ''
288 return f
288 return f
289
289
290 fallback = buildfallback()
290 fallback = buildfallback()
291 if self._checklink:
291 if self._checklink:
292 def f(x):
292 def f(x):
293 if os.path.islink(self._join(x)):
293 if os.path.islink(self._join(x)):
294 return 'l'
294 return 'l'
295 if 'x' in fallback(x):
295 if 'x' in fallback(x):
296 return 'x'
296 return 'x'
297 return ''
297 return ''
298 return f
298 return f
299 if self._checkexec:
299 if self._checkexec:
300 def f(x):
300 def f(x):
301 if 'l' in fallback(x):
301 if 'l' in fallback(x):
302 return 'l'
302 return 'l'
303 if util.isexec(self._join(x)):
303 if util.isexec(self._join(x)):
304 return 'x'
304 return 'x'
305 return ''
305 return ''
306 return f
306 return f
307 else:
307 else:
308 return fallback
308 return fallback
309
309
310 @propertycache
310 @propertycache
311 def _cwd(self):
311 def _cwd(self):
312 # internal config: ui.forcecwd
312 # internal config: ui.forcecwd
313 forcecwd = self._ui.config('ui', 'forcecwd')
313 forcecwd = self._ui.config('ui', 'forcecwd')
314 if forcecwd:
314 if forcecwd:
315 return forcecwd
315 return forcecwd
316 return pycompat.getcwd()
316 return pycompat.getcwd()
317
317
318 def getcwd(self):
318 def getcwd(self):
319 '''Return the path from which a canonical path is calculated.
319 '''Return the path from which a canonical path is calculated.
320
320
321 This path should be used to resolve file patterns or to convert
321 This path should be used to resolve file patterns or to convert
322 canonical paths back to file paths for display. It shouldn't be
322 canonical paths back to file paths for display. It shouldn't be
323 used to get real file paths. Use vfs functions instead.
323 used to get real file paths. Use vfs functions instead.
324 '''
324 '''
325 cwd = self._cwd
325 cwd = self._cwd
326 if cwd == self._root:
326 if cwd == self._root:
327 return ''
327 return ''
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 rootsep = self._root
329 rootsep = self._root
330 if not util.endswithsep(rootsep):
330 if not util.endswithsep(rootsep):
331 rootsep += pycompat.ossep
331 rootsep += pycompat.ossep
332 if cwd.startswith(rootsep):
332 if cwd.startswith(rootsep):
333 return cwd[len(rootsep):]
333 return cwd[len(rootsep):]
334 else:
334 else:
335 # we're outside the repo. return an absolute path.
335 # we're outside the repo. return an absolute path.
336 return cwd
336 return cwd
337
337
338 def pathto(self, f, cwd=None):
338 def pathto(self, f, cwd=None):
339 if cwd is None:
339 if cwd is None:
340 cwd = self.getcwd()
340 cwd = self.getcwd()
341 path = util.pathto(self._root, cwd, f)
341 path = util.pathto(self._root, cwd, f)
342 if self._slash:
342 if self._slash:
343 return util.pconvert(path)
343 return util.pconvert(path)
344 return path
344 return path
345
345
346 def __getitem__(self, key):
346 def __getitem__(self, key):
347 '''Return the current state of key (a filename) in the dirstate.
347 '''Return the current state of key (a filename) in the dirstate.
348
348
349 States are:
349 States are:
350 n normal
350 n normal
351 m needs merging
351 m needs merging
352 r marked for removal
352 r marked for removal
353 a marked for addition
353 a marked for addition
354 ? not tracked
354 ? not tracked
355 '''
355 '''
356 return self._map.get(key, ("?",))[0]
356 return self._map.get(key, ("?",))[0]
357
357
358 def __contains__(self, key):
358 def __contains__(self, key):
359 return key in self._map
359 return key in self._map
360
360
361 def __iter__(self):
361 def __iter__(self):
362 return iter(sorted(self._map))
362 return iter(sorted(self._map))
363
363
364 def items(self):
364 def items(self):
365 return self._map.iteritems()
365 return self._map.iteritems()
366
366
367 iteritems = items
367 iteritems = items
368
368
369 def parents(self):
369 def parents(self):
370 return [self._validate(p) for p in self._pl]
370 return [self._validate(p) for p in self._pl]
371
371
372 def p1(self):
372 def p1(self):
373 return self._validate(self._pl[0])
373 return self._validate(self._pl[0])
374
374
375 def p2(self):
375 def p2(self):
376 return self._validate(self._pl[1])
376 return self._validate(self._pl[1])
377
377
378 def branch(self):
378 def branch(self):
379 return encoding.tolocal(self._branch)
379 return encoding.tolocal(self._branch)
380
380
381 def setparents(self, p1, p2=nullid):
381 def setparents(self, p1, p2=nullid):
382 """Set dirstate parents to p1 and p2.
382 """Set dirstate parents to p1 and p2.
383
383
384 When moving from two parents to one, 'm' merged entries a
384 When moving from two parents to one, 'm' merged entries a
385 adjusted to normal and previous copy records discarded and
385 adjusted to normal and previous copy records discarded and
386 returned by the call.
386 returned by the call.
387
387
388 See localrepo.setparents()
388 See localrepo.setparents()
389 """
389 """
390 if self._parentwriters == 0:
390 if self._parentwriters == 0:
391 raise ValueError("cannot set dirstate parent without "
391 raise ValueError("cannot set dirstate parent without "
392 "calling dirstate.beginparentchange")
392 "calling dirstate.beginparentchange")
393
393
394 self._dirty = self._dirtypl = True
394 self._dirty = self._dirtypl = True
395 oldp2 = self._pl[1]
395 oldp2 = self._pl[1]
396 if self._origpl is None:
396 if self._origpl is None:
397 self._origpl = self._pl
397 self._origpl = self._pl
398 self._pl = p1, p2
398 self._pl = p1, p2
399 copies = {}
399 copies = {}
400 if oldp2 != nullid and p2 == nullid:
400 if oldp2 != nullid and p2 == nullid:
401 candidatefiles = self._nonnormalset.union(self._otherparentset)
401 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 for f in candidatefiles:
402 for f in candidatefiles:
403 s = self._map.get(f)
403 s = self._map.get(f)
404 if s is None:
404 if s is None:
405 continue
405 continue
406
406
407 # Discard 'm' markers when moving away from a merge state
407 # Discard 'm' markers when moving away from a merge state
408 if s[0] == 'm':
408 if s[0] == 'm':
409 source = self._copymap.get(f)
409 source = self._copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self.normallookup(f)
412 self.normallookup(f)
413 # Also fix up otherparent markers
413 # Also fix up otherparent markers
414 elif s[0] == 'n' and s[2] == -2:
414 elif s[0] == 'n' and s[2] == -2:
415 source = self._copymap.get(f)
415 source = self._copymap.get(f)
416 if source:
416 if source:
417 copies[f] = source
417 copies[f] = source
418 self.add(f)
418 self.add(f)
419 return copies
419 return copies
420
420
421 def setbranch(self, branch):
421 def setbranch(self, branch):
422 self._branch = encoding.fromlocal(branch)
422 self._branch = encoding.fromlocal(branch)
423 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
423 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
424 try:
424 try:
425 f.write(self._branch + '\n')
425 f.write(self._branch + '\n')
426 f.close()
426 f.close()
427
427
428 # make sure filecache has the correct stat info for _branch after
428 # make sure filecache has the correct stat info for _branch after
429 # replacing the underlying file
429 # replacing the underlying file
430 ce = self._filecache['_branch']
430 ce = self._filecache['_branch']
431 if ce:
431 if ce:
432 ce.refresh()
432 ce.refresh()
433 except: # re-raises
433 except: # re-raises
434 f.discard()
434 f.discard()
435 raise
435 raise
436
436
437 def _opendirstatefile(self):
437 def _opendirstatefile(self):
438 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
438 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
439 if self._pendingmode is not None and self._pendingmode != mode:
439 if self._pendingmode is not None and self._pendingmode != mode:
440 fp.close()
440 fp.close()
441 raise error.Abort(_('working directory state may be '
441 raise error.Abort(_('working directory state may be '
442 'changed parallelly'))
442 'changed parallelly'))
443 self._pendingmode = mode
443 self._pendingmode = mode
444 return fp
444 return fp
445
445
446 def _read(self):
446 def _read(self):
447 self._map = {}
447 self._map = dirstatemap()
448
448 self._copymap = {}
449 self._copymap = {}
449 # ignore HG_PENDING because identity is used only for writing
450 # ignore HG_PENDING because identity is used only for writing
450 self._identity = util.filestat.frompath(
451 self._identity = util.filestat.frompath(
451 self._opener.join(self._filename))
452 self._opener.join(self._filename))
452 try:
453 try:
453 fp = self._opendirstatefile()
454 fp = self._opendirstatefile()
454 try:
455 try:
455 st = fp.read()
456 st = fp.read()
456 finally:
457 finally:
457 fp.close()
458 fp.close()
458 except IOError as err:
459 except IOError as err:
459 if err.errno != errno.ENOENT:
460 if err.errno != errno.ENOENT:
460 raise
461 raise
461 return
462 return
462 if not st:
463 if not st:
463 return
464 return
464
465
465 if util.safehasattr(parsers, 'dict_new_presized'):
466 if util.safehasattr(parsers, 'dict_new_presized'):
466 # Make an estimate of the number of files in the dirstate based on
467 # Make an estimate of the number of files in the dirstate based on
467 # its size. From a linear regression on a set of real-world repos,
468 # its size. From a linear regression on a set of real-world repos,
468 # all over 10,000 files, the size of a dirstate entry is 85
469 # all over 10,000 files, the size of a dirstate entry is 85
469 # bytes. The cost of resizing is significantly higher than the cost
470 # bytes. The cost of resizing is significantly higher than the cost
470 # of filling in a larger presized dict, so subtract 20% from the
471 # of filling in a larger presized dict, so subtract 20% from the
471 # size.
472 # size.
472 #
473 #
473 # This heuristic is imperfect in many ways, so in a future dirstate
474 # This heuristic is imperfect in many ways, so in a future dirstate
474 # format update it makes sense to just record the number of entries
475 # format update it makes sense to just record the number of entries
475 # on write.
476 # on write.
476 self._map = parsers.dict_new_presized(len(st) / 71)
477 self._map._map = parsers.dict_new_presized(len(st) / 71)
477
478
478 # Python's garbage collector triggers a GC each time a certain number
479 # Python's garbage collector triggers a GC each time a certain number
479 # of container objects (the number being defined by
480 # of container objects (the number being defined by
480 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
481 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
481 # for each file in the dirstate. The C version then immediately marks
482 # for each file in the dirstate. The C version then immediately marks
482 # them as not to be tracked by the collector. However, this has no
483 # them as not to be tracked by the collector. However, this has no
483 # effect on when GCs are triggered, only on what objects the GC looks
484 # effect on when GCs are triggered, only on what objects the GC looks
484 # into. This means that O(number of files) GCs are unavoidable.
485 # into. This means that O(number of files) GCs are unavoidable.
485 # Depending on when in the process's lifetime the dirstate is parsed,
486 # Depending on when in the process's lifetime the dirstate is parsed,
486 # this can get very expensive. As a workaround, disable GC while
487 # this can get very expensive. As a workaround, disable GC while
487 # parsing the dirstate.
488 # parsing the dirstate.
488 #
489 #
489 # (we cannot decorate the function directly since it is in a C module)
490 # (we cannot decorate the function directly since it is in a C module)
490 parse_dirstate = util.nogc(parsers.parse_dirstate)
491 parse_dirstate = util.nogc(parsers.parse_dirstate)
491 p = parse_dirstate(self._map, self._copymap, st)
492 p = parse_dirstate(self._map._map, self._copymap, st)
492 if not self._dirtypl:
493 if not self._dirtypl:
493 self._pl = p
494 self._pl = p
494
495
495 def invalidate(self):
496 def invalidate(self):
496 '''Causes the next access to reread the dirstate.
497 '''Causes the next access to reread the dirstate.
497
498
498 This is different from localrepo.invalidatedirstate() because it always
499 This is different from localrepo.invalidatedirstate() because it always
499 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
500 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
500 check whether the dirstate has changed before rereading it.'''
501 check whether the dirstate has changed before rereading it.'''
501
502
502 for a in ("_map", "_copymap", "_identity",
503 for a in ("_map", "_copymap", "_identity",
503 "_filefoldmap", "_dirfoldmap", "_branch",
504 "_filefoldmap", "_dirfoldmap", "_branch",
504 "_pl", "_dirs", "_ignore", "_nonnormalset",
505 "_pl", "_dirs", "_ignore", "_nonnormalset",
505 "_otherparentset"):
506 "_otherparentset"):
506 if a in self.__dict__:
507 if a in self.__dict__:
507 delattr(self, a)
508 delattr(self, a)
508 self._lastnormaltime = 0
509 self._lastnormaltime = 0
509 self._dirty = False
510 self._dirty = False
510 self._updatedfiles.clear()
511 self._updatedfiles.clear()
511 self._parentwriters = 0
512 self._parentwriters = 0
512 self._origpl = None
513 self._origpl = None
513
514
514 def copy(self, source, dest):
515 def copy(self, source, dest):
515 """Mark dest as a copy of source. Unmark dest if source is None."""
516 """Mark dest as a copy of source. Unmark dest if source is None."""
516 if source == dest:
517 if source == dest:
517 return
518 return
518 self._dirty = True
519 self._dirty = True
519 if source is not None:
520 if source is not None:
520 self._copymap[dest] = source
521 self._copymap[dest] = source
521 self._updatedfiles.add(source)
522 self._updatedfiles.add(source)
522 self._updatedfiles.add(dest)
523 self._updatedfiles.add(dest)
523 elif self._copymap.pop(dest, None):
524 elif self._copymap.pop(dest, None):
524 self._updatedfiles.add(dest)
525 self._updatedfiles.add(dest)
525
526
526 def copied(self, file):
527 def copied(self, file):
527 return self._copymap.get(file, None)
528 return self._copymap.get(file, None)
528
529
529 def copies(self):
530 def copies(self):
530 return self._copymap
531 return self._copymap
531
532
532 def _droppath(self, f):
533 def _droppath(self, f):
533 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 self._dirs.delpath(f)
535 self._dirs.delpath(f)
535
536
536 if "_filefoldmap" in self.__dict__:
537 if "_filefoldmap" in self.__dict__:
537 normed = util.normcase(f)
538 normed = util.normcase(f)
538 if normed in self._filefoldmap:
539 if normed in self._filefoldmap:
539 del self._filefoldmap[normed]
540 del self._filefoldmap[normed]
540
541
541 self._updatedfiles.add(f)
542 self._updatedfiles.add(f)
542
543
543 def _addpath(self, f, state, mode, size, mtime):
544 def _addpath(self, f, state, mode, size, mtime):
544 oldstate = self[f]
545 oldstate = self[f]
545 if state == 'a' or oldstate == 'r':
546 if state == 'a' or oldstate == 'r':
546 scmutil.checkfilename(f)
547 scmutil.checkfilename(f)
547 if f in self._dirs:
548 if f in self._dirs:
548 raise error.Abort(_('directory %r already in dirstate') % f)
549 raise error.Abort(_('directory %r already in dirstate') % f)
549 # shadows
550 # shadows
550 for d in util.finddirs(f):
551 for d in util.finddirs(f):
551 if d in self._dirs:
552 if d in self._dirs:
552 break
553 break
553 entry = self._map.get(d)
554 entry = self._map.get(d)
554 if entry is not None and entry[0] != 'r':
555 if entry is not None and entry[0] != 'r':
555 raise error.Abort(
556 raise error.Abort(
556 _('file %r in dirstate clashes with %r') % (d, f))
557 _('file %r in dirstate clashes with %r') % (d, f))
557 if oldstate in "?r" and "_dirs" in self.__dict__:
558 if oldstate in "?r" and "_dirs" in self.__dict__:
558 self._dirs.addpath(f)
559 self._dirs.addpath(f)
559 self._dirty = True
560 self._dirty = True
560 self._updatedfiles.add(f)
561 self._updatedfiles.add(f)
561 self._map[f] = dirstatetuple(state, mode, size, mtime)
562 self._map[f] = dirstatetuple(state, mode, size, mtime)
562 if state != 'n' or mtime == -1:
563 if state != 'n' or mtime == -1:
563 self._nonnormalset.add(f)
564 self._nonnormalset.add(f)
564 if size == -2:
565 if size == -2:
565 self._otherparentset.add(f)
566 self._otherparentset.add(f)
566
567
567 def normal(self, f):
568 def normal(self, f):
568 '''Mark a file normal and clean.'''
569 '''Mark a file normal and clean.'''
569 s = os.lstat(self._join(f))
570 s = os.lstat(self._join(f))
570 mtime = s.st_mtime
571 mtime = s.st_mtime
571 self._addpath(f, 'n', s.st_mode,
572 self._addpath(f, 'n', s.st_mode,
572 s.st_size & _rangemask, mtime & _rangemask)
573 s.st_size & _rangemask, mtime & _rangemask)
573 self._copymap.pop(f, None)
574 self._copymap.pop(f, None)
574 if f in self._nonnormalset:
575 if f in self._nonnormalset:
575 self._nonnormalset.remove(f)
576 self._nonnormalset.remove(f)
576 if mtime > self._lastnormaltime:
577 if mtime > self._lastnormaltime:
577 # Remember the most recent modification timeslot for status(),
578 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
579 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
580 # modifications that happen within the same timeslot.
580 self._lastnormaltime = mtime
581 self._lastnormaltime = mtime
581
582
582 def normallookup(self, f):
583 def normallookup(self, f):
583 '''Mark a file normal, but possibly dirty.'''
584 '''Mark a file normal, but possibly dirty.'''
584 if self._pl[1] != nullid:
585 if self._pl[1] != nullid:
585 # if there is a merge going on and the file was either
586 # if there is a merge going on and the file was either
586 # in state 'm' (-1) or coming from other parent (-2) before
587 # in state 'm' (-1) or coming from other parent (-2) before
587 # being removed, restore that state.
588 # being removed, restore that state.
588 entry = self._map.get(f)
589 entry = self._map.get(f)
589 if entry is not None:
590 if entry is not None:
590 if entry[0] == 'r' and entry[2] in (-1, -2):
591 if entry[0] == 'r' and entry[2] in (-1, -2):
591 source = self._copymap.get(f)
592 source = self._copymap.get(f)
592 if entry[2] == -1:
593 if entry[2] == -1:
593 self.merge(f)
594 self.merge(f)
594 elif entry[2] == -2:
595 elif entry[2] == -2:
595 self.otherparent(f)
596 self.otherparent(f)
596 if source:
597 if source:
597 self.copy(source, f)
598 self.copy(source, f)
598 return
599 return
599 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
600 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
600 return
601 return
601 self._addpath(f, 'n', 0, -1, -1)
602 self._addpath(f, 'n', 0, -1, -1)
602 self._copymap.pop(f, None)
603 self._copymap.pop(f, None)
603 if f in self._nonnormalset:
604 if f in self._nonnormalset:
604 self._nonnormalset.remove(f)
605 self._nonnormalset.remove(f)
605
606
606 def otherparent(self, f):
607 def otherparent(self, f):
607 '''Mark as coming from the other parent, always dirty.'''
608 '''Mark as coming from the other parent, always dirty.'''
608 if self._pl[1] == nullid:
609 if self._pl[1] == nullid:
609 raise error.Abort(_("setting %r to other parent "
610 raise error.Abort(_("setting %r to other parent "
610 "only allowed in merges") % f)
611 "only allowed in merges") % f)
611 if f in self and self[f] == 'n':
612 if f in self and self[f] == 'n':
612 # merge-like
613 # merge-like
613 self._addpath(f, 'm', 0, -2, -1)
614 self._addpath(f, 'm', 0, -2, -1)
614 else:
615 else:
615 # add-like
616 # add-like
616 self._addpath(f, 'n', 0, -2, -1)
617 self._addpath(f, 'n', 0, -2, -1)
617 self._copymap.pop(f, None)
618 self._copymap.pop(f, None)
618
619
619 def add(self, f):
620 def add(self, f):
620 '''Mark a file added.'''
621 '''Mark a file added.'''
621 self._addpath(f, 'a', 0, -1, -1)
622 self._addpath(f, 'a', 0, -1, -1)
622 self._copymap.pop(f, None)
623 self._copymap.pop(f, None)
623
624
624 def remove(self, f):
625 def remove(self, f):
625 '''Mark a file removed.'''
626 '''Mark a file removed.'''
626 self._dirty = True
627 self._dirty = True
627 self._droppath(f)
628 self._droppath(f)
628 size = 0
629 size = 0
629 if self._pl[1] != nullid:
630 if self._pl[1] != nullid:
630 entry = self._map.get(f)
631 entry = self._map.get(f)
631 if entry is not None:
632 if entry is not None:
632 # backup the previous state
633 # backup the previous state
633 if entry[0] == 'm': # merge
634 if entry[0] == 'm': # merge
634 size = -1
635 size = -1
635 elif entry[0] == 'n' and entry[2] == -2: # other parent
636 elif entry[0] == 'n' and entry[2] == -2: # other parent
636 size = -2
637 size = -2
637 self._otherparentset.add(f)
638 self._otherparentset.add(f)
638 self._map[f] = dirstatetuple('r', 0, size, 0)
639 self._map[f] = dirstatetuple('r', 0, size, 0)
639 self._nonnormalset.add(f)
640 self._nonnormalset.add(f)
640 if size == 0:
641 if size == 0:
641 self._copymap.pop(f, None)
642 self._copymap.pop(f, None)
642
643
643 def merge(self, f):
644 def merge(self, f):
644 '''Mark a file merged.'''
645 '''Mark a file merged.'''
645 if self._pl[1] == nullid:
646 if self._pl[1] == nullid:
646 return self.normallookup(f)
647 return self.normallookup(f)
647 return self.otherparent(f)
648 return self.otherparent(f)
648
649
649 def drop(self, f):
650 def drop(self, f):
650 '''Drop a file from the dirstate'''
651 '''Drop a file from the dirstate'''
651 if f in self._map:
652 if f in self._map:
652 self._dirty = True
653 self._dirty = True
653 self._droppath(f)
654 self._droppath(f)
654 del self._map[f]
655 del self._map[f]
655 if f in self._nonnormalset:
656 if f in self._nonnormalset:
656 self._nonnormalset.remove(f)
657 self._nonnormalset.remove(f)
657 self._copymap.pop(f, None)
658 self._copymap.pop(f, None)
658
659
659 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
660 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
660 if exists is None:
661 if exists is None:
661 exists = os.path.lexists(os.path.join(self._root, path))
662 exists = os.path.lexists(os.path.join(self._root, path))
662 if not exists:
663 if not exists:
663 # Maybe a path component exists
664 # Maybe a path component exists
664 if not ignoremissing and '/' in path:
665 if not ignoremissing and '/' in path:
665 d, f = path.rsplit('/', 1)
666 d, f = path.rsplit('/', 1)
666 d = self._normalize(d, False, ignoremissing, None)
667 d = self._normalize(d, False, ignoremissing, None)
667 folded = d + "/" + f
668 folded = d + "/" + f
668 else:
669 else:
669 # No path components, preserve original case
670 # No path components, preserve original case
670 folded = path
671 folded = path
671 else:
672 else:
672 # recursively normalize leading directory components
673 # recursively normalize leading directory components
673 # against dirstate
674 # against dirstate
674 if '/' in normed:
675 if '/' in normed:
675 d, f = normed.rsplit('/', 1)
676 d, f = normed.rsplit('/', 1)
676 d = self._normalize(d, False, ignoremissing, True)
677 d = self._normalize(d, False, ignoremissing, True)
677 r = self._root + "/" + d
678 r = self._root + "/" + d
678 folded = d + "/" + util.fspath(f, r)
679 folded = d + "/" + util.fspath(f, r)
679 else:
680 else:
680 folded = util.fspath(normed, self._root)
681 folded = util.fspath(normed, self._root)
681 storemap[normed] = folded
682 storemap[normed] = folded
682
683
683 return folded
684 return folded
684
685
685 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
686 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
686 normed = util.normcase(path)
687 normed = util.normcase(path)
687 folded = self._filefoldmap.get(normed, None)
688 folded = self._filefoldmap.get(normed, None)
688 if folded is None:
689 if folded is None:
689 if isknown:
690 if isknown:
690 folded = path
691 folded = path
691 else:
692 else:
692 folded = self._discoverpath(path, normed, ignoremissing, exists,
693 folded = self._discoverpath(path, normed, ignoremissing, exists,
693 self._filefoldmap)
694 self._filefoldmap)
694 return folded
695 return folded
695
696
696 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
697 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
697 normed = util.normcase(path)
698 normed = util.normcase(path)
698 folded = self._filefoldmap.get(normed, None)
699 folded = self._filefoldmap.get(normed, None)
699 if folded is None:
700 if folded is None:
700 folded = self._dirfoldmap.get(normed, None)
701 folded = self._dirfoldmap.get(normed, None)
701 if folded is None:
702 if folded is None:
702 if isknown:
703 if isknown:
703 folded = path
704 folded = path
704 else:
705 else:
705 # store discovered result in dirfoldmap so that future
706 # store discovered result in dirfoldmap so that future
706 # normalizefile calls don't start matching directories
707 # normalizefile calls don't start matching directories
707 folded = self._discoverpath(path, normed, ignoremissing, exists,
708 folded = self._discoverpath(path, normed, ignoremissing, exists,
708 self._dirfoldmap)
709 self._dirfoldmap)
709 return folded
710 return folded
710
711
711 def normalize(self, path, isknown=False, ignoremissing=False):
712 def normalize(self, path, isknown=False, ignoremissing=False):
712 '''
713 '''
713 normalize the case of a pathname when on a casefolding filesystem
714 normalize the case of a pathname when on a casefolding filesystem
714
715
715 isknown specifies whether the filename came from walking the
716 isknown specifies whether the filename came from walking the
716 disk, to avoid extra filesystem access.
717 disk, to avoid extra filesystem access.
717
718
718 If ignoremissing is True, missing path are returned
719 If ignoremissing is True, missing path are returned
719 unchanged. Otherwise, we try harder to normalize possibly
720 unchanged. Otherwise, we try harder to normalize possibly
720 existing path components.
721 existing path components.
721
722
722 The normalized case is determined based on the following precedence:
723 The normalized case is determined based on the following precedence:
723
724
724 - version of name already stored in the dirstate
725 - version of name already stored in the dirstate
725 - version of name stored on disk
726 - version of name stored on disk
726 - version provided via command arguments
727 - version provided via command arguments
727 '''
728 '''
728
729
729 if self._checkcase:
730 if self._checkcase:
730 return self._normalize(path, isknown, ignoremissing)
731 return self._normalize(path, isknown, ignoremissing)
731 return path
732 return path
732
733
733 def clear(self):
734 def clear(self):
734 self._map = {}
735 self._map = dirstatemap()
735 self._nonnormalset = set()
736 self._nonnormalset = set()
736 self._otherparentset = set()
737 self._otherparentset = set()
737 if "_dirs" in self.__dict__:
738 if "_dirs" in self.__dict__:
738 delattr(self, "_dirs")
739 delattr(self, "_dirs")
739 self._copymap = {}
740 self._copymap = {}
740 self._pl = [nullid, nullid]
741 self._pl = [nullid, nullid]
741 self._lastnormaltime = 0
742 self._lastnormaltime = 0
742 self._updatedfiles.clear()
743 self._updatedfiles.clear()
743 self._dirty = True
744 self._dirty = True
744
745
745 def rebuild(self, parent, allfiles, changedfiles=None):
746 def rebuild(self, parent, allfiles, changedfiles=None):
746 if changedfiles is None:
747 if changedfiles is None:
747 # Rebuild entire dirstate
748 # Rebuild entire dirstate
748 changedfiles = allfiles
749 changedfiles = allfiles
749 lastnormaltime = self._lastnormaltime
750 lastnormaltime = self._lastnormaltime
750 self.clear()
751 self.clear()
751 self._lastnormaltime = lastnormaltime
752 self._lastnormaltime = lastnormaltime
752
753
753 if self._origpl is None:
754 if self._origpl is None:
754 self._origpl = self._pl
755 self._origpl = self._pl
755 self._pl = (parent, nullid)
756 self._pl = (parent, nullid)
756 for f in changedfiles:
757 for f in changedfiles:
757 if f in allfiles:
758 if f in allfiles:
758 self.normallookup(f)
759 self.normallookup(f)
759 else:
760 else:
760 self.drop(f)
761 self.drop(f)
761
762
762 self._dirty = True
763 self._dirty = True
763
764
764 def identity(self):
765 def identity(self):
765 '''Return identity of dirstate itself to detect changing in storage
766 '''Return identity of dirstate itself to detect changing in storage
766
767
767 If identity of previous dirstate is equal to this, writing
768 If identity of previous dirstate is equal to this, writing
768 changes based on the former dirstate out can keep consistency.
769 changes based on the former dirstate out can keep consistency.
769 '''
770 '''
770 return self._identity
771 return self._identity
771
772
772 def write(self, tr):
773 def write(self, tr):
773 if not self._dirty:
774 if not self._dirty:
774 return
775 return
775
776
776 filename = self._filename
777 filename = self._filename
777 if tr:
778 if tr:
778 # 'dirstate.write()' is not only for writing in-memory
779 # 'dirstate.write()' is not only for writing in-memory
779 # changes out, but also for dropping ambiguous timestamp.
780 # changes out, but also for dropping ambiguous timestamp.
780 # delayed writing re-raise "ambiguous timestamp issue".
781 # delayed writing re-raise "ambiguous timestamp issue".
781 # See also the wiki page below for detail:
782 # See also the wiki page below for detail:
782 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
783 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
783
784
784 # emulate dropping timestamp in 'parsers.pack_dirstate'
785 # emulate dropping timestamp in 'parsers.pack_dirstate'
785 now = _getfsnow(self._opener)
786 now = _getfsnow(self._opener)
786 dmap = self._map
787 dmap = self._map
787 for f in self._updatedfiles:
788 for f in self._updatedfiles:
788 e = dmap.get(f)
789 e = dmap.get(f)
789 if e is not None and e[0] == 'n' and e[3] == now:
790 if e is not None and e[0] == 'n' and e[3] == now:
790 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
791 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
791 self._nonnormalset.add(f)
792 self._nonnormalset.add(f)
792
793
793 # emulate that all 'dirstate.normal' results are written out
794 # emulate that all 'dirstate.normal' results are written out
794 self._lastnormaltime = 0
795 self._lastnormaltime = 0
795 self._updatedfiles.clear()
796 self._updatedfiles.clear()
796
797
797 # delay writing in-memory changes out
798 # delay writing in-memory changes out
798 tr.addfilegenerator('dirstate', (self._filename,),
799 tr.addfilegenerator('dirstate', (self._filename,),
799 self._writedirstate, location='plain')
800 self._writedirstate, location='plain')
800 return
801 return
801
802
802 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
803 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
803 self._writedirstate(st)
804 self._writedirstate(st)
804
805
805 def addparentchangecallback(self, category, callback):
806 def addparentchangecallback(self, category, callback):
806 """add a callback to be called when the wd parents are changed
807 """add a callback to be called when the wd parents are changed
807
808
808 Callback will be called with the following arguments:
809 Callback will be called with the following arguments:
809 dirstate, (oldp1, oldp2), (newp1, newp2)
810 dirstate, (oldp1, oldp2), (newp1, newp2)
810
811
811 Category is a unique identifier to allow overwriting an old callback
812 Category is a unique identifier to allow overwriting an old callback
812 with a newer callback.
813 with a newer callback.
813 """
814 """
814 self._plchangecallbacks[category] = callback
815 self._plchangecallbacks[category] = callback
815
816
816 def _writedirstate(self, st):
817 def _writedirstate(self, st):
817 # notify callbacks about parents change
818 # notify callbacks about parents change
818 if self._origpl is not None and self._origpl != self._pl:
819 if self._origpl is not None and self._origpl != self._pl:
819 for c, callback in sorted(self._plchangecallbacks.iteritems()):
820 for c, callback in sorted(self._plchangecallbacks.iteritems()):
820 callback(self, self._origpl, self._pl)
821 callback(self, self._origpl, self._pl)
821 self._origpl = None
822 self._origpl = None
822 # use the modification time of the newly created temporary file as the
823 # use the modification time of the newly created temporary file as the
823 # filesystem's notion of 'now'
824 # filesystem's notion of 'now'
824 now = util.fstat(st).st_mtime & _rangemask
825 now = util.fstat(st).st_mtime & _rangemask
825
826
826 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
827 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
827 # timestamp of each entries in dirstate, because of 'now > mtime'
828 # timestamp of each entries in dirstate, because of 'now > mtime'
828 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
829 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
829 if delaywrite > 0:
830 if delaywrite > 0:
830 # do we have any files to delay for?
831 # do we have any files to delay for?
831 for f, e in self._map.iteritems():
832 for f, e in self._map.iteritems():
832 if e[0] == 'n' and e[3] == now:
833 if e[0] == 'n' and e[3] == now:
833 import time # to avoid useless import
834 import time # to avoid useless import
834 # rather than sleep n seconds, sleep until the next
835 # rather than sleep n seconds, sleep until the next
835 # multiple of n seconds
836 # multiple of n seconds
836 clock = time.time()
837 clock = time.time()
837 start = int(clock) - (int(clock) % delaywrite)
838 start = int(clock) - (int(clock) % delaywrite)
838 end = start + delaywrite
839 end = start + delaywrite
839 time.sleep(end - clock)
840 time.sleep(end - clock)
840 now = end # trust our estimate that the end is near now
841 now = end # trust our estimate that the end is near now
841 break
842 break
842
843
843 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
844 st.write(parsers.pack_dirstate(self._map._map, self._copymap, self._pl,
845 now))
844 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
845 st.close()
847 st.close()
846 self._lastnormaltime = 0
848 self._lastnormaltime = 0
847 self._dirty = self._dirtypl = False
849 self._dirty = self._dirtypl = False
848
850
849 def _dirignore(self, f):
851 def _dirignore(self, f):
850 if f == '.':
852 if f == '.':
851 return False
853 return False
852 if self._ignore(f):
854 if self._ignore(f):
853 return True
855 return True
854 for p in util.finddirs(f):
856 for p in util.finddirs(f):
855 if self._ignore(p):
857 if self._ignore(p):
856 return True
858 return True
857 return False
859 return False
858
860
859 def _ignorefiles(self):
861 def _ignorefiles(self):
860 files = []
862 files = []
861 if os.path.exists(self._join('.hgignore')):
863 if os.path.exists(self._join('.hgignore')):
862 files.append(self._join('.hgignore'))
864 files.append(self._join('.hgignore'))
863 for name, path in self._ui.configitems("ui"):
865 for name, path in self._ui.configitems("ui"):
864 if name == 'ignore' or name.startswith('ignore.'):
866 if name == 'ignore' or name.startswith('ignore.'):
865 # we need to use os.path.join here rather than self._join
867 # we need to use os.path.join here rather than self._join
866 # because path is arbitrary and user-specified
868 # because path is arbitrary and user-specified
867 files.append(os.path.join(self._rootdir, util.expandpath(path)))
869 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 return files
870 return files
869
871
870 def _ignorefileandline(self, f):
872 def _ignorefileandline(self, f):
871 files = collections.deque(self._ignorefiles())
873 files = collections.deque(self._ignorefiles())
872 visited = set()
874 visited = set()
873 while files:
875 while files:
874 i = files.popleft()
876 i = files.popleft()
875 patterns = matchmod.readpatternfile(i, self._ui.warn,
877 patterns = matchmod.readpatternfile(i, self._ui.warn,
876 sourceinfo=True)
878 sourceinfo=True)
877 for pattern, lineno, line in patterns:
879 for pattern, lineno, line in patterns:
878 kind, p = matchmod._patsplit(pattern, 'glob')
880 kind, p = matchmod._patsplit(pattern, 'glob')
879 if kind == "subinclude":
881 if kind == "subinclude":
880 if p not in visited:
882 if p not in visited:
881 files.append(p)
883 files.append(p)
882 continue
884 continue
883 m = matchmod.match(self._root, '', [], [pattern],
885 m = matchmod.match(self._root, '', [], [pattern],
884 warn=self._ui.warn)
886 warn=self._ui.warn)
885 if m(f):
887 if m(f):
886 return (i, lineno, line)
888 return (i, lineno, line)
887 visited.add(i)
889 visited.add(i)
888 return (None, -1, "")
890 return (None, -1, "")
889
891
890 def _walkexplicit(self, match, subrepos):
892 def _walkexplicit(self, match, subrepos):
891 '''Get stat data about the files explicitly specified by match.
893 '''Get stat data about the files explicitly specified by match.
892
894
893 Return a triple (results, dirsfound, dirsnotfound).
895 Return a triple (results, dirsfound, dirsnotfound).
894 - results is a mapping from filename to stat result. It also contains
896 - results is a mapping from filename to stat result. It also contains
895 listings mapping subrepos and .hg to None.
897 listings mapping subrepos and .hg to None.
896 - dirsfound is a list of files found to be directories.
898 - dirsfound is a list of files found to be directories.
897 - dirsnotfound is a list of files that the dirstate thinks are
899 - dirsnotfound is a list of files that the dirstate thinks are
898 directories and that were not found.'''
900 directories and that were not found.'''
899
901
900 def badtype(mode):
902 def badtype(mode):
901 kind = _('unknown')
903 kind = _('unknown')
902 if stat.S_ISCHR(mode):
904 if stat.S_ISCHR(mode):
903 kind = _('character device')
905 kind = _('character device')
904 elif stat.S_ISBLK(mode):
906 elif stat.S_ISBLK(mode):
905 kind = _('block device')
907 kind = _('block device')
906 elif stat.S_ISFIFO(mode):
908 elif stat.S_ISFIFO(mode):
907 kind = _('fifo')
909 kind = _('fifo')
908 elif stat.S_ISSOCK(mode):
910 elif stat.S_ISSOCK(mode):
909 kind = _('socket')
911 kind = _('socket')
910 elif stat.S_ISDIR(mode):
912 elif stat.S_ISDIR(mode):
911 kind = _('directory')
913 kind = _('directory')
912 return _('unsupported file type (type is %s)') % kind
914 return _('unsupported file type (type is %s)') % kind
913
915
914 matchedir = match.explicitdir
916 matchedir = match.explicitdir
915 badfn = match.bad
917 badfn = match.bad
916 dmap = self._map
918 dmap = self._map
917 lstat = os.lstat
919 lstat = os.lstat
918 getkind = stat.S_IFMT
920 getkind = stat.S_IFMT
919 dirkind = stat.S_IFDIR
921 dirkind = stat.S_IFDIR
920 regkind = stat.S_IFREG
922 regkind = stat.S_IFREG
921 lnkkind = stat.S_IFLNK
923 lnkkind = stat.S_IFLNK
922 join = self._join
924 join = self._join
923 dirsfound = []
925 dirsfound = []
924 foundadd = dirsfound.append
926 foundadd = dirsfound.append
925 dirsnotfound = []
927 dirsnotfound = []
926 notfoundadd = dirsnotfound.append
928 notfoundadd = dirsnotfound.append
927
929
928 if not match.isexact() and self._checkcase:
930 if not match.isexact() and self._checkcase:
929 normalize = self._normalize
931 normalize = self._normalize
930 else:
932 else:
931 normalize = None
933 normalize = None
932
934
933 files = sorted(match.files())
935 files = sorted(match.files())
934 subrepos.sort()
936 subrepos.sort()
935 i, j = 0, 0
937 i, j = 0, 0
936 while i < len(files) and j < len(subrepos):
938 while i < len(files) and j < len(subrepos):
937 subpath = subrepos[j] + "/"
939 subpath = subrepos[j] + "/"
938 if files[i] < subpath:
940 if files[i] < subpath:
939 i += 1
941 i += 1
940 continue
942 continue
941 while i < len(files) and files[i].startswith(subpath):
943 while i < len(files) and files[i].startswith(subpath):
942 del files[i]
944 del files[i]
943 j += 1
945 j += 1
944
946
945 if not files or '.' in files:
947 if not files or '.' in files:
946 files = ['.']
948 files = ['.']
947 results = dict.fromkeys(subrepos)
949 results = dict.fromkeys(subrepos)
948 results['.hg'] = None
950 results['.hg'] = None
949
951
950 alldirs = None
952 alldirs = None
951 for ff in files:
953 for ff in files:
952 # constructing the foldmap is expensive, so don't do it for the
954 # constructing the foldmap is expensive, so don't do it for the
953 # common case where files is ['.']
955 # common case where files is ['.']
954 if normalize and ff != '.':
956 if normalize and ff != '.':
955 nf = normalize(ff, False, True)
957 nf = normalize(ff, False, True)
956 else:
958 else:
957 nf = ff
959 nf = ff
958 if nf in results:
960 if nf in results:
959 continue
961 continue
960
962
961 try:
963 try:
962 st = lstat(join(nf))
964 st = lstat(join(nf))
963 kind = getkind(st.st_mode)
965 kind = getkind(st.st_mode)
964 if kind == dirkind:
966 if kind == dirkind:
965 if nf in dmap:
967 if nf in dmap:
966 # file replaced by dir on disk but still in dirstate
968 # file replaced by dir on disk but still in dirstate
967 results[nf] = None
969 results[nf] = None
968 if matchedir:
970 if matchedir:
969 matchedir(nf)
971 matchedir(nf)
970 foundadd((nf, ff))
972 foundadd((nf, ff))
971 elif kind == regkind or kind == lnkkind:
973 elif kind == regkind or kind == lnkkind:
972 results[nf] = st
974 results[nf] = st
973 else:
975 else:
974 badfn(ff, badtype(kind))
976 badfn(ff, badtype(kind))
975 if nf in dmap:
977 if nf in dmap:
976 results[nf] = None
978 results[nf] = None
977 except OSError as inst: # nf not found on disk - it is dirstate only
979 except OSError as inst: # nf not found on disk - it is dirstate only
978 if nf in dmap: # does it exactly match a missing file?
980 if nf in dmap: # does it exactly match a missing file?
979 results[nf] = None
981 results[nf] = None
980 else: # does it match a missing directory?
982 else: # does it match a missing directory?
981 if alldirs is None:
983 if alldirs is None:
982 alldirs = util.dirs(dmap)
984 alldirs = util.dirs(dmap._map)
983 if nf in alldirs:
985 if nf in alldirs:
984 if matchedir:
986 if matchedir:
985 matchedir(nf)
987 matchedir(nf)
986 notfoundadd(nf)
988 notfoundadd(nf)
987 else:
989 else:
988 badfn(ff, encoding.strtolocal(inst.strerror))
990 badfn(ff, encoding.strtolocal(inst.strerror))
989
991
990 # Case insensitive filesystems cannot rely on lstat() failing to detect
992 # Case insensitive filesystems cannot rely on lstat() failing to detect
991 # a case-only rename. Prune the stat object for any file that does not
993 # a case-only rename. Prune the stat object for any file that does not
992 # match the case in the filesystem, if there are multiple files that
994 # match the case in the filesystem, if there are multiple files that
993 # normalize to the same path.
995 # normalize to the same path.
994 if match.isexact() and self._checkcase:
996 if match.isexact() and self._checkcase:
995 normed = {}
997 normed = {}
996
998
997 for f, st in results.iteritems():
999 for f, st in results.iteritems():
998 if st is None:
1000 if st is None:
999 continue
1001 continue
1000
1002
1001 nc = util.normcase(f)
1003 nc = util.normcase(f)
1002 paths = normed.get(nc)
1004 paths = normed.get(nc)
1003
1005
1004 if paths is None:
1006 if paths is None:
1005 paths = set()
1007 paths = set()
1006 normed[nc] = paths
1008 normed[nc] = paths
1007
1009
1008 paths.add(f)
1010 paths.add(f)
1009
1011
1010 for norm, paths in normed.iteritems():
1012 for norm, paths in normed.iteritems():
1011 if len(paths) > 1:
1013 if len(paths) > 1:
1012 for path in paths:
1014 for path in paths:
1013 folded = self._discoverpath(path, norm, True, None,
1015 folded = self._discoverpath(path, norm, True, None,
1014 self._dirfoldmap)
1016 self._dirfoldmap)
1015 if path != folded:
1017 if path != folded:
1016 results[path] = None
1018 results[path] = None
1017
1019
1018 return results, dirsfound, dirsnotfound
1020 return results, dirsfound, dirsnotfound
1019
1021
1020 def walk(self, match, subrepos, unknown, ignored, full=True):
1022 def walk(self, match, subrepos, unknown, ignored, full=True):
1021 '''
1023 '''
1022 Walk recursively through the directory tree, finding all files
1024 Walk recursively through the directory tree, finding all files
1023 matched by match.
1025 matched by match.
1024
1026
1025 If full is False, maybe skip some known-clean files.
1027 If full is False, maybe skip some known-clean files.
1026
1028
1027 Return a dict mapping filename to stat-like object (either
1029 Return a dict mapping filename to stat-like object (either
1028 mercurial.osutil.stat instance or return value of os.stat()).
1030 mercurial.osutil.stat instance or return value of os.stat()).
1029
1031
1030 '''
1032 '''
1031 # full is a flag that extensions that hook into walk can use -- this
1033 # full is a flag that extensions that hook into walk can use -- this
1032 # implementation doesn't use it at all. This satisfies the contract
1034 # implementation doesn't use it at all. This satisfies the contract
1033 # because we only guarantee a "maybe".
1035 # because we only guarantee a "maybe".
1034
1036
1035 if ignored:
1037 if ignored:
1036 ignore = util.never
1038 ignore = util.never
1037 dirignore = util.never
1039 dirignore = util.never
1038 elif unknown:
1040 elif unknown:
1039 ignore = self._ignore
1041 ignore = self._ignore
1040 dirignore = self._dirignore
1042 dirignore = self._dirignore
1041 else:
1043 else:
1042 # if not unknown and not ignored, drop dir recursion and step 2
1044 # if not unknown and not ignored, drop dir recursion and step 2
1043 ignore = util.always
1045 ignore = util.always
1044 dirignore = util.always
1046 dirignore = util.always
1045
1047
1046 matchfn = match.matchfn
1048 matchfn = match.matchfn
1047 matchalways = match.always()
1049 matchalways = match.always()
1048 matchtdir = match.traversedir
1050 matchtdir = match.traversedir
1049 dmap = self._map
1051 dmap = self._map
1050 listdir = util.listdir
1052 listdir = util.listdir
1051 lstat = os.lstat
1053 lstat = os.lstat
1052 dirkind = stat.S_IFDIR
1054 dirkind = stat.S_IFDIR
1053 regkind = stat.S_IFREG
1055 regkind = stat.S_IFREG
1054 lnkkind = stat.S_IFLNK
1056 lnkkind = stat.S_IFLNK
1055 join = self._join
1057 join = self._join
1056
1058
1057 exact = skipstep3 = False
1059 exact = skipstep3 = False
1058 if match.isexact(): # match.exact
1060 if match.isexact(): # match.exact
1059 exact = True
1061 exact = True
1060 dirignore = util.always # skip step 2
1062 dirignore = util.always # skip step 2
1061 elif match.prefix(): # match.match, no patterns
1063 elif match.prefix(): # match.match, no patterns
1062 skipstep3 = True
1064 skipstep3 = True
1063
1065
1064 if not exact and self._checkcase:
1066 if not exact and self._checkcase:
1065 normalize = self._normalize
1067 normalize = self._normalize
1066 normalizefile = self._normalizefile
1068 normalizefile = self._normalizefile
1067 skipstep3 = False
1069 skipstep3 = False
1068 else:
1070 else:
1069 normalize = self._normalize
1071 normalize = self._normalize
1070 normalizefile = None
1072 normalizefile = None
1071
1073
1072 # step 1: find all explicit files
1074 # step 1: find all explicit files
1073 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1075 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1074
1076
1075 skipstep3 = skipstep3 and not (work or dirsnotfound)
1077 skipstep3 = skipstep3 and not (work or dirsnotfound)
1076 work = [d for d in work if not dirignore(d[0])]
1078 work = [d for d in work if not dirignore(d[0])]
1077
1079
1078 # step 2: visit subdirectories
1080 # step 2: visit subdirectories
1079 def traverse(work, alreadynormed):
1081 def traverse(work, alreadynormed):
1080 wadd = work.append
1082 wadd = work.append
1081 while work:
1083 while work:
1082 nd = work.pop()
1084 nd = work.pop()
1083 if not match.visitdir(nd):
1085 if not match.visitdir(nd):
1084 continue
1086 continue
1085 skip = None
1087 skip = None
1086 if nd == '.':
1088 if nd == '.':
1087 nd = ''
1089 nd = ''
1088 else:
1090 else:
1089 skip = '.hg'
1091 skip = '.hg'
1090 try:
1092 try:
1091 entries = listdir(join(nd), stat=True, skip=skip)
1093 entries = listdir(join(nd), stat=True, skip=skip)
1092 except OSError as inst:
1094 except OSError as inst:
1093 if inst.errno in (errno.EACCES, errno.ENOENT):
1095 if inst.errno in (errno.EACCES, errno.ENOENT):
1094 match.bad(self.pathto(nd),
1096 match.bad(self.pathto(nd),
1095 encoding.strtolocal(inst.strerror))
1097 encoding.strtolocal(inst.strerror))
1096 continue
1098 continue
1097 raise
1099 raise
1098 for f, kind, st in entries:
1100 for f, kind, st in entries:
1099 if normalizefile:
1101 if normalizefile:
1100 # even though f might be a directory, we're only
1102 # even though f might be a directory, we're only
1101 # interested in comparing it to files currently in the
1103 # interested in comparing it to files currently in the
1102 # dmap -- therefore normalizefile is enough
1104 # dmap -- therefore normalizefile is enough
1103 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1104 True)
1106 True)
1105 else:
1107 else:
1106 nf = nd and (nd + "/" + f) or f
1108 nf = nd and (nd + "/" + f) or f
1107 if nf not in results:
1109 if nf not in results:
1108 if kind == dirkind:
1110 if kind == dirkind:
1109 if not ignore(nf):
1111 if not ignore(nf):
1110 if matchtdir:
1112 if matchtdir:
1111 matchtdir(nf)
1113 matchtdir(nf)
1112 wadd(nf)
1114 wadd(nf)
1113 if nf in dmap and (matchalways or matchfn(nf)):
1115 if nf in dmap and (matchalways or matchfn(nf)):
1114 results[nf] = None
1116 results[nf] = None
1115 elif kind == regkind or kind == lnkkind:
1117 elif kind == regkind or kind == lnkkind:
1116 if nf in dmap:
1118 if nf in dmap:
1117 if matchalways or matchfn(nf):
1119 if matchalways or matchfn(nf):
1118 results[nf] = st
1120 results[nf] = st
1119 elif ((matchalways or matchfn(nf))
1121 elif ((matchalways or matchfn(nf))
1120 and not ignore(nf)):
1122 and not ignore(nf)):
1121 # unknown file -- normalize if necessary
1123 # unknown file -- normalize if necessary
1122 if not alreadynormed:
1124 if not alreadynormed:
1123 nf = normalize(nf, False, True)
1125 nf = normalize(nf, False, True)
1124 results[nf] = st
1126 results[nf] = st
1125 elif nf in dmap and (matchalways or matchfn(nf)):
1127 elif nf in dmap and (matchalways or matchfn(nf)):
1126 results[nf] = None
1128 results[nf] = None
1127
1129
1128 for nd, d in work:
1130 for nd, d in work:
1129 # alreadynormed means that processwork doesn't have to do any
1131 # alreadynormed means that processwork doesn't have to do any
1130 # expensive directory normalization
1132 # expensive directory normalization
1131 alreadynormed = not normalize or nd == d
1133 alreadynormed = not normalize or nd == d
1132 traverse([d], alreadynormed)
1134 traverse([d], alreadynormed)
1133
1135
1134 for s in subrepos:
1136 for s in subrepos:
1135 del results[s]
1137 del results[s]
1136 del results['.hg']
1138 del results['.hg']
1137
1139
1138 # step 3: visit remaining files from dmap
1140 # step 3: visit remaining files from dmap
1139 if not skipstep3 and not exact:
1141 if not skipstep3 and not exact:
1140 # If a dmap file is not in results yet, it was either
1142 # If a dmap file is not in results yet, it was either
1141 # a) not matching matchfn b) ignored, c) missing, or d) under a
1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1142 # symlink directory.
1144 # symlink directory.
1143 if not results and matchalways:
1145 if not results and matchalways:
1144 visit = [f for f in dmap]
1146 visit = [f for f in dmap]
1145 else:
1147 else:
1146 visit = [f for f in dmap if f not in results and matchfn(f)]
1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1147 visit.sort()
1149 visit.sort()
1148
1150
1149 if unknown:
1151 if unknown:
1150 # unknown == True means we walked all dirs under the roots
1152 # unknown == True means we walked all dirs under the roots
1151 # that wasn't ignored, and everything that matched was stat'ed
1153 # that wasn't ignored, and everything that matched was stat'ed
1152 # and is already in results.
1154 # and is already in results.
1153 # The rest must thus be ignored or under a symlink.
1155 # The rest must thus be ignored or under a symlink.
1154 audit_path = pathutil.pathauditor(self._root, cached=True)
1156 audit_path = pathutil.pathauditor(self._root, cached=True)
1155
1157
1156 for nf in iter(visit):
1158 for nf in iter(visit):
1157 # If a stat for the same file was already added with a
1159 # If a stat for the same file was already added with a
1158 # different case, don't add one for this, since that would
1160 # different case, don't add one for this, since that would
1159 # make it appear as if the file exists under both names
1161 # make it appear as if the file exists under both names
1160 # on disk.
1162 # on disk.
1161 if (normalizefile and
1163 if (normalizefile and
1162 normalizefile(nf, True, True) in results):
1164 normalizefile(nf, True, True) in results):
1163 results[nf] = None
1165 results[nf] = None
1164 # Report ignored items in the dmap as long as they are not
1166 # Report ignored items in the dmap as long as they are not
1165 # under a symlink directory.
1167 # under a symlink directory.
1166 elif audit_path.check(nf):
1168 elif audit_path.check(nf):
1167 try:
1169 try:
1168 results[nf] = lstat(join(nf))
1170 results[nf] = lstat(join(nf))
1169 # file was just ignored, no links, and exists
1171 # file was just ignored, no links, and exists
1170 except OSError:
1172 except OSError:
1171 # file doesn't exist
1173 # file doesn't exist
1172 results[nf] = None
1174 results[nf] = None
1173 else:
1175 else:
1174 # It's either missing or under a symlink directory
1176 # It's either missing or under a symlink directory
1175 # which we in this case report as missing
1177 # which we in this case report as missing
1176 results[nf] = None
1178 results[nf] = None
1177 else:
1179 else:
1178 # We may not have walked the full directory tree above,
1180 # We may not have walked the full directory tree above,
1179 # so stat and check everything we missed.
1181 # so stat and check everything we missed.
1180 iv = iter(visit)
1182 iv = iter(visit)
1181 for st in util.statfiles([join(i) for i in visit]):
1183 for st in util.statfiles([join(i) for i in visit]):
1182 results[next(iv)] = st
1184 results[next(iv)] = st
1183 return results
1185 return results
1184
1186
1185 def status(self, match, subrepos, ignored, clean, unknown):
1187 def status(self, match, subrepos, ignored, clean, unknown):
1186 '''Determine the status of the working copy relative to the
1188 '''Determine the status of the working copy relative to the
1187 dirstate and return a pair of (unsure, status), where status is of type
1189 dirstate and return a pair of (unsure, status), where status is of type
1188 scmutil.status and:
1190 scmutil.status and:
1189
1191
1190 unsure:
1192 unsure:
1191 files that might have been modified since the dirstate was
1193 files that might have been modified since the dirstate was
1192 written, but need to be read to be sure (size is the same
1194 written, but need to be read to be sure (size is the same
1193 but mtime differs)
1195 but mtime differs)
1194 status.modified:
1196 status.modified:
1195 files that have definitely been modified since the dirstate
1197 files that have definitely been modified since the dirstate
1196 was written (different size or mode)
1198 was written (different size or mode)
1197 status.clean:
1199 status.clean:
1198 files that have definitely not been modified since the
1200 files that have definitely not been modified since the
1199 dirstate was written
1201 dirstate was written
1200 '''
1202 '''
1201 listignored, listclean, listunknown = ignored, clean, unknown
1203 listignored, listclean, listunknown = ignored, clean, unknown
1202 lookup, modified, added, unknown, ignored = [], [], [], [], []
1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1203 removed, deleted, clean = [], [], []
1205 removed, deleted, clean = [], [], []
1204
1206
1205 dmap = self._map
1207 dmap = self._map
1206 ladd = lookup.append # aka "unsure"
1208 ladd = lookup.append # aka "unsure"
1207 madd = modified.append
1209 madd = modified.append
1208 aadd = added.append
1210 aadd = added.append
1209 uadd = unknown.append
1211 uadd = unknown.append
1210 iadd = ignored.append
1212 iadd = ignored.append
1211 radd = removed.append
1213 radd = removed.append
1212 dadd = deleted.append
1214 dadd = deleted.append
1213 cadd = clean.append
1215 cadd = clean.append
1214 mexact = match.exact
1216 mexact = match.exact
1215 dirignore = self._dirignore
1217 dirignore = self._dirignore
1216 checkexec = self._checkexec
1218 checkexec = self._checkexec
1217 copymap = self._copymap
1219 copymap = self._copymap
1218 lastnormaltime = self._lastnormaltime
1220 lastnormaltime = self._lastnormaltime
1219
1221
1220 # We need to do full walks when either
1222 # We need to do full walks when either
1221 # - we're listing all clean files, or
1223 # - we're listing all clean files, or
1222 # - match.traversedir does something, because match.traversedir should
1224 # - match.traversedir does something, because match.traversedir should
1223 # be called for every dir in the working dir
1225 # be called for every dir in the working dir
1224 full = listclean or match.traversedir is not None
1226 full = listclean or match.traversedir is not None
1225 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1226 full=full).iteritems():
1228 full=full).iteritems():
1227 if fn not in dmap:
1229 if fn not in dmap:
1228 if (listignored or mexact(fn)) and dirignore(fn):
1230 if (listignored or mexact(fn)) and dirignore(fn):
1229 if listignored:
1231 if listignored:
1230 iadd(fn)
1232 iadd(fn)
1231 else:
1233 else:
1232 uadd(fn)
1234 uadd(fn)
1233 continue
1235 continue
1234
1236
1235 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1236 # written like that for performance reasons. dmap[fn] is not a
1238 # written like that for performance reasons. dmap[fn] is not a
1237 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1238 # opcode has fast paths when the value to be unpacked is a tuple or
1240 # opcode has fast paths when the value to be unpacked is a tuple or
1239 # a list, but falls back to creating a full-fledged iterator in
1241 # a list, but falls back to creating a full-fledged iterator in
1240 # general. That is much slower than simply accessing and storing the
1242 # general. That is much slower than simply accessing and storing the
1241 # tuple members one by one.
1243 # tuple members one by one.
1242 t = dmap[fn]
1244 t = dmap[fn]
1243 state = t[0]
1245 state = t[0]
1244 mode = t[1]
1246 mode = t[1]
1245 size = t[2]
1247 size = t[2]
1246 time = t[3]
1248 time = t[3]
1247
1249
1248 if not st and state in "nma":
1250 if not st and state in "nma":
1249 dadd(fn)
1251 dadd(fn)
1250 elif state == 'n':
1252 elif state == 'n':
1251 if (size >= 0 and
1253 if (size >= 0 and
1252 ((size != st.st_size and size != st.st_size & _rangemask)
1254 ((size != st.st_size and size != st.st_size & _rangemask)
1253 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1254 or size == -2 # other parent
1256 or size == -2 # other parent
1255 or fn in copymap):
1257 or fn in copymap):
1256 madd(fn)
1258 madd(fn)
1257 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1258 ladd(fn)
1260 ladd(fn)
1259 elif st.st_mtime == lastnormaltime:
1261 elif st.st_mtime == lastnormaltime:
1260 # fn may have just been marked as normal and it may have
1262 # fn may have just been marked as normal and it may have
1261 # changed in the same second without changing its size.
1263 # changed in the same second without changing its size.
1262 # This can happen if we quickly do multiple commits.
1264 # This can happen if we quickly do multiple commits.
1263 # Force lookup, so we don't miss such a racy file change.
1265 # Force lookup, so we don't miss such a racy file change.
1264 ladd(fn)
1266 ladd(fn)
1265 elif listclean:
1267 elif listclean:
1266 cadd(fn)
1268 cadd(fn)
1267 elif state == 'm':
1269 elif state == 'm':
1268 madd(fn)
1270 madd(fn)
1269 elif state == 'a':
1271 elif state == 'a':
1270 aadd(fn)
1272 aadd(fn)
1271 elif state == 'r':
1273 elif state == 'r':
1272 radd(fn)
1274 radd(fn)
1273
1275
1274 return (lookup, scmutil.status(modified, added, removed, deleted,
1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1275 unknown, ignored, clean))
1277 unknown, ignored, clean))
1276
1278
1277 def matches(self, match):
1279 def matches(self, match):
1278 '''
1280 '''
1279 return files in the dirstate (in whatever state) filtered by match
1281 return files in the dirstate (in whatever state) filtered by match
1280 '''
1282 '''
1281 dmap = self._map
1283 dmap = self._map
1282 if match.always():
1284 if match.always():
1283 return dmap.keys()
1285 return dmap.keys()
1284 files = match.files()
1286 files = match.files()
1285 if match.isexact():
1287 if match.isexact():
1286 # fast path -- filter the other way around, since typically files is
1288 # fast path -- filter the other way around, since typically files is
1287 # much smaller than dmap
1289 # much smaller than dmap
1288 return [f for f in files if f in dmap]
1290 return [f for f in files if f in dmap]
1289 if match.prefix() and all(fn in dmap for fn in files):
1291 if match.prefix() and all(fn in dmap for fn in files):
1290 # fast path -- all the values are known to be files, so just return
1292 # fast path -- all the values are known to be files, so just return
1291 # that
1293 # that
1292 return list(files)
1294 return list(files)
1293 return [f for f in dmap if match(f)]
1295 return [f for f in dmap if match(f)]
1294
1296
1295 def _actualfilename(self, tr):
1297 def _actualfilename(self, tr):
1296 if tr:
1298 if tr:
1297 return self._pendingfilename
1299 return self._pendingfilename
1298 else:
1300 else:
1299 return self._filename
1301 return self._filename
1300
1302
1301 def savebackup(self, tr, backupname):
1303 def savebackup(self, tr, backupname):
1302 '''Save current dirstate into backup file'''
1304 '''Save current dirstate into backup file'''
1303 filename = self._actualfilename(tr)
1305 filename = self._actualfilename(tr)
1304 assert backupname != filename
1306 assert backupname != filename
1305
1307
1306 # use '_writedirstate' instead of 'write' to write changes certainly,
1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1307 # because the latter omits writing out if transaction is running.
1309 # because the latter omits writing out if transaction is running.
1308 # output file will be used to create backup of dirstate at this point.
1310 # output file will be used to create backup of dirstate at this point.
1309 if self._dirty or not self._opener.exists(filename):
1311 if self._dirty or not self._opener.exists(filename):
1310 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1311 checkambig=True))
1313 checkambig=True))
1312
1314
1313 if tr:
1315 if tr:
1314 # ensure that subsequent tr.writepending returns True for
1316 # ensure that subsequent tr.writepending returns True for
1315 # changes written out above, even if dirstate is never
1317 # changes written out above, even if dirstate is never
1316 # changed after this
1318 # changed after this
1317 tr.addfilegenerator('dirstate', (self._filename,),
1319 tr.addfilegenerator('dirstate', (self._filename,),
1318 self._writedirstate, location='plain')
1320 self._writedirstate, location='plain')
1319
1321
1320 # ensure that pending file written above is unlinked at
1322 # ensure that pending file written above is unlinked at
1321 # failure, even if tr.writepending isn't invoked until the
1323 # failure, even if tr.writepending isn't invoked until the
1322 # end of this transaction
1324 # end of this transaction
1323 tr.registertmp(filename, location='plain')
1325 tr.registertmp(filename, location='plain')
1324
1326
1325 self._opener.tryunlink(backupname)
1327 self._opener.tryunlink(backupname)
1326 # hardlink backup is okay because _writedirstate is always called
1328 # hardlink backup is okay because _writedirstate is always called
1327 # with an "atomictemp=True" file.
1329 # with an "atomictemp=True" file.
1328 util.copyfile(self._opener.join(filename),
1330 util.copyfile(self._opener.join(filename),
1329 self._opener.join(backupname), hardlink=True)
1331 self._opener.join(backupname), hardlink=True)
1330
1332
1331 def restorebackup(self, tr, backupname):
1333 def restorebackup(self, tr, backupname):
1332 '''Restore dirstate by backup file'''
1334 '''Restore dirstate by backup file'''
1333 # this "invalidate()" prevents "wlock.release()" from writing
1335 # this "invalidate()" prevents "wlock.release()" from writing
1334 # changes of dirstate out after restoring from backup file
1336 # changes of dirstate out after restoring from backup file
1335 self.invalidate()
1337 self.invalidate()
1336 filename = self._actualfilename(tr)
1338 filename = self._actualfilename(tr)
1337 self._opener.rename(backupname, filename, checkambig=True)
1339 self._opener.rename(backupname, filename, checkambig=True)
1338
1340
1339 def clearbackup(self, tr, backupname):
1341 def clearbackup(self, tr, backupname):
1340 '''Clear backup file'''
1342 '''Clear backup file'''
1341 self._opener.unlink(backupname)
1343 self._opener.unlink(backupname)
1344
1345 class dirstatemap(object):
1346 def __init__(self):
1347 self._map = {}
1348
1349 def iteritems(self):
1350 return self._map.iteritems()
1351
1352 def __iter__(self):
1353 return iter(self._map)
1354
1355 def get(self, key, default=None):
1356 return self._map.get(key, default)
1357
1358 def __contains__(self, key):
1359 return key in self._map
1360
1361 def __setitem__(self, key, value):
1362 self._map[key] = value
1363
1364 def __getitem__(self, key):
1365 return self._map[key]
1366
1367 def __delitem__(self, key):
1368 del self._map[key]
1369
1370 def keys(self):
1371 return self._map.keys()
General Comments 0
You need to be logged in to leave comments. Login now