##// END OF EJS Templates
dirstate: centralize _cwd handling into _cwd method...
FUJIWARA Katsunori -
r33212:b7f6885c default
parent child Browse files
Show More
@@ -1,1336 +1,1336
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd).st_mtime
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 def nonnormalentries(dmap):
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap)
61 except AttributeError:
61 except AttributeError:
62 nonnorm = set()
62 nonnorm = set()
63 otherparent = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
68 otherparent.add(fname)
69 return nonnorm, otherparent
69 return nonnorm, otherparent
70
70
71 class dirstate(object):
71 class dirstate(object):
72
72
73 def __init__(self, opener, ui, root, validate):
73 def __init__(self, opener, ui, root, validate):
74 '''Create a new dirstate object.
74 '''Create a new dirstate object.
75
75
76 opener is an open()-like callable that can be used to open the
76 opener is an open()-like callable that can be used to open the
77 dirstate file; root is the root of the directory tracked by
77 dirstate file; root is the root of the directory tracked by
78 the dirstate.
78 the dirstate.
79 '''
79 '''
80 self._opener = opener
80 self._opener = opener
81 self._validate = validate
81 self._validate = validate
82 self._root = root
82 self._root = root
83 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
83 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 # UNC path pointing to root share (issue4557)
84 # UNC path pointing to root share (issue4557)
85 self._rootdir = pathutil.normasprefix(root)
85 self._rootdir = pathutil.normasprefix(root)
86 # internal config: ui.forcecwd
87 forcecwd = ui.config('ui', 'forcecwd')
88 if forcecwd:
89 self._cwd = forcecwd
90 self._dirty = False
86 self._dirty = False
91 self._dirtypl = False
87 self._dirtypl = False
92 self._lastnormaltime = 0
88 self._lastnormaltime = 0
93 self._ui = ui
89 self._ui = ui
94 self._filecache = {}
90 self._filecache = {}
95 self._parentwriters = 0
91 self._parentwriters = 0
96 self._filename = 'dirstate'
92 self._filename = 'dirstate'
97 self._pendingfilename = '%s.pending' % self._filename
93 self._pendingfilename = '%s.pending' % self._filename
98 self._plchangecallbacks = {}
94 self._plchangecallbacks = {}
99 self._origpl = None
95 self._origpl = None
100 self._updatedfiles = set()
96 self._updatedfiles = set()
101
97
102 # for consistent view between _pl() and _read() invocations
98 # for consistent view between _pl() and _read() invocations
103 self._pendingmode = None
99 self._pendingmode = None
104
100
105 @contextlib.contextmanager
101 @contextlib.contextmanager
106 def parentchange(self):
102 def parentchange(self):
107 '''Context manager for handling dirstate parents.
103 '''Context manager for handling dirstate parents.
108
104
109 If an exception occurs in the scope of the context manager,
105 If an exception occurs in the scope of the context manager,
110 the incoherent dirstate won't be written when wlock is
106 the incoherent dirstate won't be written when wlock is
111 released.
107 released.
112 '''
108 '''
113 self._parentwriters += 1
109 self._parentwriters += 1
114 yield
110 yield
115 # Typically we want the "undo" step of a context manager in a
111 # Typically we want the "undo" step of a context manager in a
116 # finally block so it happens even when an exception
112 # finally block so it happens even when an exception
117 # occurs. In this case, however, we only want to decrement
113 # occurs. In this case, however, we only want to decrement
118 # parentwriters if the code in the with statement exits
114 # parentwriters if the code in the with statement exits
119 # normally, so we don't have a try/finally here on purpose.
115 # normally, so we don't have a try/finally here on purpose.
120 self._parentwriters -= 1
116 self._parentwriters -= 1
121
117
122 def beginparentchange(self):
118 def beginparentchange(self):
123 '''Marks the beginning of a set of changes that involve changing
119 '''Marks the beginning of a set of changes that involve changing
124 the dirstate parents. If there is an exception during this time,
120 the dirstate parents. If there is an exception during this time,
125 the dirstate will not be written when the wlock is released. This
121 the dirstate will not be written when the wlock is released. This
126 prevents writing an incoherent dirstate where the parent doesn't
122 prevents writing an incoherent dirstate where the parent doesn't
127 match the contents.
123 match the contents.
128 '''
124 '''
129 self._ui.deprecwarn('beginparentchange is obsoleted by the '
125 self._ui.deprecwarn('beginparentchange is obsoleted by the '
130 'parentchange context manager.', '4.3')
126 'parentchange context manager.', '4.3')
131 self._parentwriters += 1
127 self._parentwriters += 1
132
128
133 def endparentchange(self):
129 def endparentchange(self):
134 '''Marks the end of a set of changes that involve changing the
130 '''Marks the end of a set of changes that involve changing the
135 dirstate parents. Once all parent changes have been marked done,
131 dirstate parents. Once all parent changes have been marked done,
136 the wlock will be free to write the dirstate on release.
132 the wlock will be free to write the dirstate on release.
137 '''
133 '''
138 self._ui.deprecwarn('endparentchange is obsoleted by the '
134 self._ui.deprecwarn('endparentchange is obsoleted by the '
139 'parentchange context manager.', '4.3')
135 'parentchange context manager.', '4.3')
140 if self._parentwriters > 0:
136 if self._parentwriters > 0:
141 self._parentwriters -= 1
137 self._parentwriters -= 1
142
138
143 def pendingparentchange(self):
139 def pendingparentchange(self):
144 '''Returns true if the dirstate is in the middle of a set of changes
140 '''Returns true if the dirstate is in the middle of a set of changes
145 that modify the dirstate parent.
141 that modify the dirstate parent.
146 '''
142 '''
147 return self._parentwriters > 0
143 return self._parentwriters > 0
148
144
149 @propertycache
145 @propertycache
150 def _map(self):
146 def _map(self):
151 '''Return the dirstate contents as a map from filename to
147 '''Return the dirstate contents as a map from filename to
152 (state, mode, size, time).'''
148 (state, mode, size, time).'''
153 self._read()
149 self._read()
154 return self._map
150 return self._map
155
151
156 @propertycache
152 @propertycache
157 def _copymap(self):
153 def _copymap(self):
158 self._read()
154 self._read()
159 return self._copymap
155 return self._copymap
160
156
161 @propertycache
157 @propertycache
162 def _identity(self):
158 def _identity(self):
163 self._read()
159 self._read()
164 return self._identity
160 return self._identity
165
161
166 @propertycache
162 @propertycache
167 def _nonnormalset(self):
163 def _nonnormalset(self):
168 nonnorm, otherparents = nonnormalentries(self._map)
164 nonnorm, otherparents = nonnormalentries(self._map)
169 self._otherparentset = otherparents
165 self._otherparentset = otherparents
170 return nonnorm
166 return nonnorm
171
167
172 @propertycache
168 @propertycache
173 def _otherparentset(self):
169 def _otherparentset(self):
174 nonnorm, otherparents = nonnormalentries(self._map)
170 nonnorm, otherparents = nonnormalentries(self._map)
175 self._nonnormalset = nonnorm
171 self._nonnormalset = nonnorm
176 return otherparents
172 return otherparents
177
173
178 @propertycache
174 @propertycache
179 def _filefoldmap(self):
175 def _filefoldmap(self):
180 try:
176 try:
181 makefilefoldmap = parsers.make_file_foldmap
177 makefilefoldmap = parsers.make_file_foldmap
182 except AttributeError:
178 except AttributeError:
183 pass
179 pass
184 else:
180 else:
185 return makefilefoldmap(self._map, util.normcasespec,
181 return makefilefoldmap(self._map, util.normcasespec,
186 util.normcasefallback)
182 util.normcasefallback)
187
183
188 f = {}
184 f = {}
189 normcase = util.normcase
185 normcase = util.normcase
190 for name, s in self._map.iteritems():
186 for name, s in self._map.iteritems():
191 if s[0] != 'r':
187 if s[0] != 'r':
192 f[normcase(name)] = name
188 f[normcase(name)] = name
193 f['.'] = '.' # prevents useless util.fspath() invocation
189 f['.'] = '.' # prevents useless util.fspath() invocation
194 return f
190 return f
195
191
196 @propertycache
192 @propertycache
197 def _dirfoldmap(self):
193 def _dirfoldmap(self):
198 f = {}
194 f = {}
199 normcase = util.normcase
195 normcase = util.normcase
200 for name in self._dirs:
196 for name in self._dirs:
201 f[normcase(name)] = name
197 f[normcase(name)] = name
202 return f
198 return f
203
199
204 @repocache('branch')
200 @repocache('branch')
205 def _branch(self):
201 def _branch(self):
206 try:
202 try:
207 return self._opener.read("branch").strip() or "default"
203 return self._opener.read("branch").strip() or "default"
208 except IOError as inst:
204 except IOError as inst:
209 if inst.errno != errno.ENOENT:
205 if inst.errno != errno.ENOENT:
210 raise
206 raise
211 return "default"
207 return "default"
212
208
213 @propertycache
209 @propertycache
214 def _pl(self):
210 def _pl(self):
215 try:
211 try:
216 fp = self._opendirstatefile()
212 fp = self._opendirstatefile()
217 st = fp.read(40)
213 st = fp.read(40)
218 fp.close()
214 fp.close()
219 l = len(st)
215 l = len(st)
220 if l == 40:
216 if l == 40:
221 return st[:20], st[20:40]
217 return st[:20], st[20:40]
222 elif l > 0 and l < 40:
218 elif l > 0 and l < 40:
223 raise error.Abort(_('working directory state appears damaged!'))
219 raise error.Abort(_('working directory state appears damaged!'))
224 except IOError as err:
220 except IOError as err:
225 if err.errno != errno.ENOENT:
221 if err.errno != errno.ENOENT:
226 raise
222 raise
227 return [nullid, nullid]
223 return [nullid, nullid]
228
224
229 @propertycache
225 @propertycache
230 def _dirs(self):
226 def _dirs(self):
231 return util.dirs(self._map, 'r')
227 return util.dirs(self._map, 'r')
232
228
233 def dirs(self):
229 def dirs(self):
234 return self._dirs
230 return self._dirs
235
231
236 @rootcache('.hgignore')
232 @rootcache('.hgignore')
237 def _ignore(self):
233 def _ignore(self):
238 files = self._ignorefiles()
234 files = self._ignorefiles()
239 if not files:
235 if not files:
240 return matchmod.never(self._root, '')
236 return matchmod.never(self._root, '')
241
237
242 pats = ['include:%s' % f for f in files]
238 pats = ['include:%s' % f for f in files]
243 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
239 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
244
240
245 @propertycache
241 @propertycache
246 def _slash(self):
242 def _slash(self):
247 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
243 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
248
244
249 @propertycache
245 @propertycache
250 def _checklink(self):
246 def _checklink(self):
251 return util.checklink(self._root)
247 return util.checklink(self._root)
252
248
253 @propertycache
249 @propertycache
254 def _checkexec(self):
250 def _checkexec(self):
255 return util.checkexec(self._root)
251 return util.checkexec(self._root)
256
252
257 @propertycache
253 @propertycache
258 def _checkcase(self):
254 def _checkcase(self):
259 return not util.fscasesensitive(self._join('.hg'))
255 return not util.fscasesensitive(self._join('.hg'))
260
256
261 def _join(self, f):
257 def _join(self, f):
262 # much faster than os.path.join()
258 # much faster than os.path.join()
263 # it's safe because f is always a relative path
259 # it's safe because f is always a relative path
264 return self._rootdir + f
260 return self._rootdir + f
265
261
266 def flagfunc(self, buildfallback):
262 def flagfunc(self, buildfallback):
267 if self._checklink and self._checkexec:
263 if self._checklink and self._checkexec:
268 def f(x):
264 def f(x):
269 try:
265 try:
270 st = os.lstat(self._join(x))
266 st = os.lstat(self._join(x))
271 if util.statislink(st):
267 if util.statislink(st):
272 return 'l'
268 return 'l'
273 if util.statisexec(st):
269 if util.statisexec(st):
274 return 'x'
270 return 'x'
275 except OSError:
271 except OSError:
276 pass
272 pass
277 return ''
273 return ''
278 return f
274 return f
279
275
280 fallback = buildfallback()
276 fallback = buildfallback()
281 if self._checklink:
277 if self._checklink:
282 def f(x):
278 def f(x):
283 if os.path.islink(self._join(x)):
279 if os.path.islink(self._join(x)):
284 return 'l'
280 return 'l'
285 if 'x' in fallback(x):
281 if 'x' in fallback(x):
286 return 'x'
282 return 'x'
287 return ''
283 return ''
288 return f
284 return f
289 if self._checkexec:
285 if self._checkexec:
290 def f(x):
286 def f(x):
291 if 'l' in fallback(x):
287 if 'l' in fallback(x):
292 return 'l'
288 return 'l'
293 if util.isexec(self._join(x)):
289 if util.isexec(self._join(x)):
294 return 'x'
290 return 'x'
295 return ''
291 return ''
296 return f
292 return f
297 else:
293 else:
298 return fallback
294 return fallback
299
295
300 @propertycache
296 @propertycache
301 def _cwd(self):
297 def _cwd(self):
298 # internal config: ui.forcecwd
299 forcecwd = self._ui.config('ui', 'forcecwd')
300 if forcecwd:
301 return forcecwd
302 return pycompat.getcwd()
302 return pycompat.getcwd()
303
303
304 def getcwd(self):
304 def getcwd(self):
305 '''Return the path from which a canonical path is calculated.
305 '''Return the path from which a canonical path is calculated.
306
306
307 This path should be used to resolve file patterns or to convert
307 This path should be used to resolve file patterns or to convert
308 canonical paths back to file paths for display. It shouldn't be
308 canonical paths back to file paths for display. It shouldn't be
309 used to get real file paths. Use vfs functions instead.
309 used to get real file paths. Use vfs functions instead.
310 '''
310 '''
311 cwd = self._cwd
311 cwd = self._cwd
312 if cwd == self._root:
312 if cwd == self._root:
313 return ''
313 return ''
314 # self._root ends with a path separator if self._root is '/' or 'C:\'
314 # self._root ends with a path separator if self._root is '/' or 'C:\'
315 rootsep = self._root
315 rootsep = self._root
316 if not util.endswithsep(rootsep):
316 if not util.endswithsep(rootsep):
317 rootsep += pycompat.ossep
317 rootsep += pycompat.ossep
318 if cwd.startswith(rootsep):
318 if cwd.startswith(rootsep):
319 return cwd[len(rootsep):]
319 return cwd[len(rootsep):]
320 else:
320 else:
321 # we're outside the repo. return an absolute path.
321 # we're outside the repo. return an absolute path.
322 return cwd
322 return cwd
323
323
324 def pathto(self, f, cwd=None):
324 def pathto(self, f, cwd=None):
325 if cwd is None:
325 if cwd is None:
326 cwd = self.getcwd()
326 cwd = self.getcwd()
327 path = util.pathto(self._root, cwd, f)
327 path = util.pathto(self._root, cwd, f)
328 if self._slash:
328 if self._slash:
329 return util.pconvert(path)
329 return util.pconvert(path)
330 return path
330 return path
331
331
332 def __getitem__(self, key):
332 def __getitem__(self, key):
333 '''Return the current state of key (a filename) in the dirstate.
333 '''Return the current state of key (a filename) in the dirstate.
334
334
335 States are:
335 States are:
336 n normal
336 n normal
337 m needs merging
337 m needs merging
338 r marked for removal
338 r marked for removal
339 a marked for addition
339 a marked for addition
340 ? not tracked
340 ? not tracked
341 '''
341 '''
342 return self._map.get(key, ("?",))[0]
342 return self._map.get(key, ("?",))[0]
343
343
344 def __contains__(self, key):
344 def __contains__(self, key):
345 return key in self._map
345 return key in self._map
346
346
347 def __iter__(self):
347 def __iter__(self):
348 for x in sorted(self._map):
348 for x in sorted(self._map):
349 yield x
349 yield x
350
350
351 def items(self):
351 def items(self):
352 return self._map.iteritems()
352 return self._map.iteritems()
353
353
354 iteritems = items
354 iteritems = items
355
355
356 def parents(self):
356 def parents(self):
357 return [self._validate(p) for p in self._pl]
357 return [self._validate(p) for p in self._pl]
358
358
359 def p1(self):
359 def p1(self):
360 return self._validate(self._pl[0])
360 return self._validate(self._pl[0])
361
361
362 def p2(self):
362 def p2(self):
363 return self._validate(self._pl[1])
363 return self._validate(self._pl[1])
364
364
365 def branch(self):
365 def branch(self):
366 return encoding.tolocal(self._branch)
366 return encoding.tolocal(self._branch)
367
367
368 def setparents(self, p1, p2=nullid):
368 def setparents(self, p1, p2=nullid):
369 """Set dirstate parents to p1 and p2.
369 """Set dirstate parents to p1 and p2.
370
370
371 When moving from two parents to one, 'm' merged entries a
371 When moving from two parents to one, 'm' merged entries a
372 adjusted to normal and previous copy records discarded and
372 adjusted to normal and previous copy records discarded and
373 returned by the call.
373 returned by the call.
374
374
375 See localrepo.setparents()
375 See localrepo.setparents()
376 """
376 """
377 if self._parentwriters == 0:
377 if self._parentwriters == 0:
378 raise ValueError("cannot set dirstate parent without "
378 raise ValueError("cannot set dirstate parent without "
379 "calling dirstate.beginparentchange")
379 "calling dirstate.beginparentchange")
380
380
381 self._dirty = self._dirtypl = True
381 self._dirty = self._dirtypl = True
382 oldp2 = self._pl[1]
382 oldp2 = self._pl[1]
383 if self._origpl is None:
383 if self._origpl is None:
384 self._origpl = self._pl
384 self._origpl = self._pl
385 self._pl = p1, p2
385 self._pl = p1, p2
386 copies = {}
386 copies = {}
387 if oldp2 != nullid and p2 == nullid:
387 if oldp2 != nullid and p2 == nullid:
388 candidatefiles = self._nonnormalset.union(self._otherparentset)
388 candidatefiles = self._nonnormalset.union(self._otherparentset)
389 for f in candidatefiles:
389 for f in candidatefiles:
390 s = self._map.get(f)
390 s = self._map.get(f)
391 if s is None:
391 if s is None:
392 continue
392 continue
393
393
394 # Discard 'm' markers when moving away from a merge state
394 # Discard 'm' markers when moving away from a merge state
395 if s[0] == 'm':
395 if s[0] == 'm':
396 if f in self._copymap:
396 if f in self._copymap:
397 copies[f] = self._copymap[f]
397 copies[f] = self._copymap[f]
398 self.normallookup(f)
398 self.normallookup(f)
399 # Also fix up otherparent markers
399 # Also fix up otherparent markers
400 elif s[0] == 'n' and s[2] == -2:
400 elif s[0] == 'n' and s[2] == -2:
401 if f in self._copymap:
401 if f in self._copymap:
402 copies[f] = self._copymap[f]
402 copies[f] = self._copymap[f]
403 self.add(f)
403 self.add(f)
404 return copies
404 return copies
405
405
406 def setbranch(self, branch):
406 def setbranch(self, branch):
407 self._branch = encoding.fromlocal(branch)
407 self._branch = encoding.fromlocal(branch)
408 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
408 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
409 try:
409 try:
410 f.write(self._branch + '\n')
410 f.write(self._branch + '\n')
411 f.close()
411 f.close()
412
412
413 # make sure filecache has the correct stat info for _branch after
413 # make sure filecache has the correct stat info for _branch after
414 # replacing the underlying file
414 # replacing the underlying file
415 ce = self._filecache['_branch']
415 ce = self._filecache['_branch']
416 if ce:
416 if ce:
417 ce.refresh()
417 ce.refresh()
418 except: # re-raises
418 except: # re-raises
419 f.discard()
419 f.discard()
420 raise
420 raise
421
421
422 def _opendirstatefile(self):
422 def _opendirstatefile(self):
423 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
423 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
424 if self._pendingmode is not None and self._pendingmode != mode:
424 if self._pendingmode is not None and self._pendingmode != mode:
425 fp.close()
425 fp.close()
426 raise error.Abort(_('working directory state may be '
426 raise error.Abort(_('working directory state may be '
427 'changed parallelly'))
427 'changed parallelly'))
428 self._pendingmode = mode
428 self._pendingmode = mode
429 return fp
429 return fp
430
430
431 def _read(self):
431 def _read(self):
432 self._map = {}
432 self._map = {}
433 self._copymap = {}
433 self._copymap = {}
434 # ignore HG_PENDING because identity is used only for writing
434 # ignore HG_PENDING because identity is used only for writing
435 self._identity = util.filestat.frompath(
435 self._identity = util.filestat.frompath(
436 self._opener.join(self._filename))
436 self._opener.join(self._filename))
437 try:
437 try:
438 fp = self._opendirstatefile()
438 fp = self._opendirstatefile()
439 try:
439 try:
440 st = fp.read()
440 st = fp.read()
441 finally:
441 finally:
442 fp.close()
442 fp.close()
443 except IOError as err:
443 except IOError as err:
444 if err.errno != errno.ENOENT:
444 if err.errno != errno.ENOENT:
445 raise
445 raise
446 return
446 return
447 if not st:
447 if not st:
448 return
448 return
449
449
450 if util.safehasattr(parsers, 'dict_new_presized'):
450 if util.safehasattr(parsers, 'dict_new_presized'):
451 # Make an estimate of the number of files in the dirstate based on
451 # Make an estimate of the number of files in the dirstate based on
452 # its size. From a linear regression on a set of real-world repos,
452 # its size. From a linear regression on a set of real-world repos,
453 # all over 10,000 files, the size of a dirstate entry is 85
453 # all over 10,000 files, the size of a dirstate entry is 85
454 # bytes. The cost of resizing is significantly higher than the cost
454 # bytes. The cost of resizing is significantly higher than the cost
455 # of filling in a larger presized dict, so subtract 20% from the
455 # of filling in a larger presized dict, so subtract 20% from the
456 # size.
456 # size.
457 #
457 #
458 # This heuristic is imperfect in many ways, so in a future dirstate
458 # This heuristic is imperfect in many ways, so in a future dirstate
459 # format update it makes sense to just record the number of entries
459 # format update it makes sense to just record the number of entries
460 # on write.
460 # on write.
461 self._map = parsers.dict_new_presized(len(st) / 71)
461 self._map = parsers.dict_new_presized(len(st) / 71)
462
462
463 # Python's garbage collector triggers a GC each time a certain number
463 # Python's garbage collector triggers a GC each time a certain number
464 # of container objects (the number being defined by
464 # of container objects (the number being defined by
465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
466 # for each file in the dirstate. The C version then immediately marks
466 # for each file in the dirstate. The C version then immediately marks
467 # them as not to be tracked by the collector. However, this has no
467 # them as not to be tracked by the collector. However, this has no
468 # effect on when GCs are triggered, only on what objects the GC looks
468 # effect on when GCs are triggered, only on what objects the GC looks
469 # into. This means that O(number of files) GCs are unavoidable.
469 # into. This means that O(number of files) GCs are unavoidable.
470 # Depending on when in the process's lifetime the dirstate is parsed,
470 # Depending on when in the process's lifetime the dirstate is parsed,
471 # this can get very expensive. As a workaround, disable GC while
471 # this can get very expensive. As a workaround, disable GC while
472 # parsing the dirstate.
472 # parsing the dirstate.
473 #
473 #
474 # (we cannot decorate the function directly since it is in a C module)
474 # (we cannot decorate the function directly since it is in a C module)
475 parse_dirstate = util.nogc(parsers.parse_dirstate)
475 parse_dirstate = util.nogc(parsers.parse_dirstate)
476 p = parse_dirstate(self._map, self._copymap, st)
476 p = parse_dirstate(self._map, self._copymap, st)
477 if not self._dirtypl:
477 if not self._dirtypl:
478 self._pl = p
478 self._pl = p
479
479
480 def invalidate(self):
480 def invalidate(self):
481 '''Causes the next access to reread the dirstate.
481 '''Causes the next access to reread the dirstate.
482
482
483 This is different from localrepo.invalidatedirstate() because it always
483 This is different from localrepo.invalidatedirstate() because it always
484 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
484 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
485 check whether the dirstate has changed before rereading it.'''
485 check whether the dirstate has changed before rereading it.'''
486
486
487 for a in ("_map", "_copymap", "_identity",
487 for a in ("_map", "_copymap", "_identity",
488 "_filefoldmap", "_dirfoldmap", "_branch",
488 "_filefoldmap", "_dirfoldmap", "_branch",
489 "_pl", "_dirs", "_ignore", "_nonnormalset",
489 "_pl", "_dirs", "_ignore", "_nonnormalset",
490 "_otherparentset"):
490 "_otherparentset"):
491 if a in self.__dict__:
491 if a in self.__dict__:
492 delattr(self, a)
492 delattr(self, a)
493 self._lastnormaltime = 0
493 self._lastnormaltime = 0
494 self._dirty = False
494 self._dirty = False
495 self._updatedfiles.clear()
495 self._updatedfiles.clear()
496 self._parentwriters = 0
496 self._parentwriters = 0
497 self._origpl = None
497 self._origpl = None
498
498
499 def copy(self, source, dest):
499 def copy(self, source, dest):
500 """Mark dest as a copy of source. Unmark dest if source is None."""
500 """Mark dest as a copy of source. Unmark dest if source is None."""
501 if source == dest:
501 if source == dest:
502 return
502 return
503 self._dirty = True
503 self._dirty = True
504 if source is not None:
504 if source is not None:
505 self._copymap[dest] = source
505 self._copymap[dest] = source
506 self._updatedfiles.add(source)
506 self._updatedfiles.add(source)
507 self._updatedfiles.add(dest)
507 self._updatedfiles.add(dest)
508 elif dest in self._copymap:
508 elif dest in self._copymap:
509 del self._copymap[dest]
509 del self._copymap[dest]
510 self._updatedfiles.add(dest)
510 self._updatedfiles.add(dest)
511
511
512 def copied(self, file):
512 def copied(self, file):
513 return self._copymap.get(file, None)
513 return self._copymap.get(file, None)
514
514
515 def copies(self):
515 def copies(self):
516 return self._copymap
516 return self._copymap
517
517
518 def _droppath(self, f):
518 def _droppath(self, f):
519 if self[f] not in "?r" and "_dirs" in self.__dict__:
519 if self[f] not in "?r" and "_dirs" in self.__dict__:
520 self._dirs.delpath(f)
520 self._dirs.delpath(f)
521
521
522 if "_filefoldmap" in self.__dict__:
522 if "_filefoldmap" in self.__dict__:
523 normed = util.normcase(f)
523 normed = util.normcase(f)
524 if normed in self._filefoldmap:
524 if normed in self._filefoldmap:
525 del self._filefoldmap[normed]
525 del self._filefoldmap[normed]
526
526
527 self._updatedfiles.add(f)
527 self._updatedfiles.add(f)
528
528
529 def _addpath(self, f, state, mode, size, mtime):
529 def _addpath(self, f, state, mode, size, mtime):
530 oldstate = self[f]
530 oldstate = self[f]
531 if state == 'a' or oldstate == 'r':
531 if state == 'a' or oldstate == 'r':
532 scmutil.checkfilename(f)
532 scmutil.checkfilename(f)
533 if f in self._dirs:
533 if f in self._dirs:
534 raise error.Abort(_('directory %r already in dirstate') % f)
534 raise error.Abort(_('directory %r already in dirstate') % f)
535 # shadows
535 # shadows
536 for d in util.finddirs(f):
536 for d in util.finddirs(f):
537 if d in self._dirs:
537 if d in self._dirs:
538 break
538 break
539 if d in self._map and self[d] != 'r':
539 if d in self._map and self[d] != 'r':
540 raise error.Abort(
540 raise error.Abort(
541 _('file %r in dirstate clashes with %r') % (d, f))
541 _('file %r in dirstate clashes with %r') % (d, f))
542 if oldstate in "?r" and "_dirs" in self.__dict__:
542 if oldstate in "?r" and "_dirs" in self.__dict__:
543 self._dirs.addpath(f)
543 self._dirs.addpath(f)
544 self._dirty = True
544 self._dirty = True
545 self._updatedfiles.add(f)
545 self._updatedfiles.add(f)
546 self._map[f] = dirstatetuple(state, mode, size, mtime)
546 self._map[f] = dirstatetuple(state, mode, size, mtime)
547 if state != 'n' or mtime == -1:
547 if state != 'n' or mtime == -1:
548 self._nonnormalset.add(f)
548 self._nonnormalset.add(f)
549 if size == -2:
549 if size == -2:
550 self._otherparentset.add(f)
550 self._otherparentset.add(f)
551
551
552 def normal(self, f):
552 def normal(self, f):
553 '''Mark a file normal and clean.'''
553 '''Mark a file normal and clean.'''
554 s = os.lstat(self._join(f))
554 s = os.lstat(self._join(f))
555 mtime = s.st_mtime
555 mtime = s.st_mtime
556 self._addpath(f, 'n', s.st_mode,
556 self._addpath(f, 'n', s.st_mode,
557 s.st_size & _rangemask, mtime & _rangemask)
557 s.st_size & _rangemask, mtime & _rangemask)
558 if f in self._copymap:
558 if f in self._copymap:
559 del self._copymap[f]
559 del self._copymap[f]
560 if f in self._nonnormalset:
560 if f in self._nonnormalset:
561 self._nonnormalset.remove(f)
561 self._nonnormalset.remove(f)
562 if mtime > self._lastnormaltime:
562 if mtime > self._lastnormaltime:
563 # Remember the most recent modification timeslot for status(),
563 # Remember the most recent modification timeslot for status(),
564 # to make sure we won't miss future size-preserving file content
564 # to make sure we won't miss future size-preserving file content
565 # modifications that happen within the same timeslot.
565 # modifications that happen within the same timeslot.
566 self._lastnormaltime = mtime
566 self._lastnormaltime = mtime
567
567
568 def normallookup(self, f):
568 def normallookup(self, f):
569 '''Mark a file normal, but possibly dirty.'''
569 '''Mark a file normal, but possibly dirty.'''
570 if self._pl[1] != nullid and f in self._map:
570 if self._pl[1] != nullid and f in self._map:
571 # if there is a merge going on and the file was either
571 # if there is a merge going on and the file was either
572 # in state 'm' (-1) or coming from other parent (-2) before
572 # in state 'm' (-1) or coming from other parent (-2) before
573 # being removed, restore that state.
573 # being removed, restore that state.
574 entry = self._map[f]
574 entry = self._map[f]
575 if entry[0] == 'r' and entry[2] in (-1, -2):
575 if entry[0] == 'r' and entry[2] in (-1, -2):
576 source = self._copymap.get(f)
576 source = self._copymap.get(f)
577 if entry[2] == -1:
577 if entry[2] == -1:
578 self.merge(f)
578 self.merge(f)
579 elif entry[2] == -2:
579 elif entry[2] == -2:
580 self.otherparent(f)
580 self.otherparent(f)
581 if source:
581 if source:
582 self.copy(source, f)
582 self.copy(source, f)
583 return
583 return
584 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
584 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
585 return
585 return
586 self._addpath(f, 'n', 0, -1, -1)
586 self._addpath(f, 'n', 0, -1, -1)
587 if f in self._copymap:
587 if f in self._copymap:
588 del self._copymap[f]
588 del self._copymap[f]
589 if f in self._nonnormalset:
589 if f in self._nonnormalset:
590 self._nonnormalset.remove(f)
590 self._nonnormalset.remove(f)
591
591
592 def otherparent(self, f):
592 def otherparent(self, f):
593 '''Mark as coming from the other parent, always dirty.'''
593 '''Mark as coming from the other parent, always dirty.'''
594 if self._pl[1] == nullid:
594 if self._pl[1] == nullid:
595 raise error.Abort(_("setting %r to other parent "
595 raise error.Abort(_("setting %r to other parent "
596 "only allowed in merges") % f)
596 "only allowed in merges") % f)
597 if f in self and self[f] == 'n':
597 if f in self and self[f] == 'n':
598 # merge-like
598 # merge-like
599 self._addpath(f, 'm', 0, -2, -1)
599 self._addpath(f, 'm', 0, -2, -1)
600 else:
600 else:
601 # add-like
601 # add-like
602 self._addpath(f, 'n', 0, -2, -1)
602 self._addpath(f, 'n', 0, -2, -1)
603
603
604 if f in self._copymap:
604 if f in self._copymap:
605 del self._copymap[f]
605 del self._copymap[f]
606
606
607 def add(self, f):
607 def add(self, f):
608 '''Mark a file added.'''
608 '''Mark a file added.'''
609 self._addpath(f, 'a', 0, -1, -1)
609 self._addpath(f, 'a', 0, -1, -1)
610 if f in self._copymap:
610 if f in self._copymap:
611 del self._copymap[f]
611 del self._copymap[f]
612
612
613 def remove(self, f):
613 def remove(self, f):
614 '''Mark a file removed.'''
614 '''Mark a file removed.'''
615 self._dirty = True
615 self._dirty = True
616 self._droppath(f)
616 self._droppath(f)
617 size = 0
617 size = 0
618 if self._pl[1] != nullid and f in self._map:
618 if self._pl[1] != nullid and f in self._map:
619 # backup the previous state
619 # backup the previous state
620 entry = self._map[f]
620 entry = self._map[f]
621 if entry[0] == 'm': # merge
621 if entry[0] == 'm': # merge
622 size = -1
622 size = -1
623 elif entry[0] == 'n' and entry[2] == -2: # other parent
623 elif entry[0] == 'n' and entry[2] == -2: # other parent
624 size = -2
624 size = -2
625 self._otherparentset.add(f)
625 self._otherparentset.add(f)
626 self._map[f] = dirstatetuple('r', 0, size, 0)
626 self._map[f] = dirstatetuple('r', 0, size, 0)
627 self._nonnormalset.add(f)
627 self._nonnormalset.add(f)
628 if size == 0 and f in self._copymap:
628 if size == 0 and f in self._copymap:
629 del self._copymap[f]
629 del self._copymap[f]
630
630
631 def merge(self, f):
631 def merge(self, f):
632 '''Mark a file merged.'''
632 '''Mark a file merged.'''
633 if self._pl[1] == nullid:
633 if self._pl[1] == nullid:
634 return self.normallookup(f)
634 return self.normallookup(f)
635 return self.otherparent(f)
635 return self.otherparent(f)
636
636
637 def drop(self, f):
637 def drop(self, f):
638 '''Drop a file from the dirstate'''
638 '''Drop a file from the dirstate'''
639 if f in self._map:
639 if f in self._map:
640 self._dirty = True
640 self._dirty = True
641 self._droppath(f)
641 self._droppath(f)
642 del self._map[f]
642 del self._map[f]
643 if f in self._nonnormalset:
643 if f in self._nonnormalset:
644 self._nonnormalset.remove(f)
644 self._nonnormalset.remove(f)
645 if f in self._copymap:
645 if f in self._copymap:
646 del self._copymap[f]
646 del self._copymap[f]
647
647
648 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
648 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
649 if exists is None:
649 if exists is None:
650 exists = os.path.lexists(os.path.join(self._root, path))
650 exists = os.path.lexists(os.path.join(self._root, path))
651 if not exists:
651 if not exists:
652 # Maybe a path component exists
652 # Maybe a path component exists
653 if not ignoremissing and '/' in path:
653 if not ignoremissing and '/' in path:
654 d, f = path.rsplit('/', 1)
654 d, f = path.rsplit('/', 1)
655 d = self._normalize(d, False, ignoremissing, None)
655 d = self._normalize(d, False, ignoremissing, None)
656 folded = d + "/" + f
656 folded = d + "/" + f
657 else:
657 else:
658 # No path components, preserve original case
658 # No path components, preserve original case
659 folded = path
659 folded = path
660 else:
660 else:
661 # recursively normalize leading directory components
661 # recursively normalize leading directory components
662 # against dirstate
662 # against dirstate
663 if '/' in normed:
663 if '/' in normed:
664 d, f = normed.rsplit('/', 1)
664 d, f = normed.rsplit('/', 1)
665 d = self._normalize(d, False, ignoremissing, True)
665 d = self._normalize(d, False, ignoremissing, True)
666 r = self._root + "/" + d
666 r = self._root + "/" + d
667 folded = d + "/" + util.fspath(f, r)
667 folded = d + "/" + util.fspath(f, r)
668 else:
668 else:
669 folded = util.fspath(normed, self._root)
669 folded = util.fspath(normed, self._root)
670 storemap[normed] = folded
670 storemap[normed] = folded
671
671
672 return folded
672 return folded
673
673
674 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
674 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
675 normed = util.normcase(path)
675 normed = util.normcase(path)
676 folded = self._filefoldmap.get(normed, None)
676 folded = self._filefoldmap.get(normed, None)
677 if folded is None:
677 if folded is None:
678 if isknown:
678 if isknown:
679 folded = path
679 folded = path
680 else:
680 else:
681 folded = self._discoverpath(path, normed, ignoremissing, exists,
681 folded = self._discoverpath(path, normed, ignoremissing, exists,
682 self._filefoldmap)
682 self._filefoldmap)
683 return folded
683 return folded
684
684
685 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
685 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
686 normed = util.normcase(path)
686 normed = util.normcase(path)
687 folded = self._filefoldmap.get(normed, None)
687 folded = self._filefoldmap.get(normed, None)
688 if folded is None:
688 if folded is None:
689 folded = self._dirfoldmap.get(normed, None)
689 folded = self._dirfoldmap.get(normed, None)
690 if folded is None:
690 if folded is None:
691 if isknown:
691 if isknown:
692 folded = path
692 folded = path
693 else:
693 else:
694 # store discovered result in dirfoldmap so that future
694 # store discovered result in dirfoldmap so that future
695 # normalizefile calls don't start matching directories
695 # normalizefile calls don't start matching directories
696 folded = self._discoverpath(path, normed, ignoremissing, exists,
696 folded = self._discoverpath(path, normed, ignoremissing, exists,
697 self._dirfoldmap)
697 self._dirfoldmap)
698 return folded
698 return folded
699
699
700 def normalize(self, path, isknown=False, ignoremissing=False):
700 def normalize(self, path, isknown=False, ignoremissing=False):
701 '''
701 '''
702 normalize the case of a pathname when on a casefolding filesystem
702 normalize the case of a pathname when on a casefolding filesystem
703
703
704 isknown specifies whether the filename came from walking the
704 isknown specifies whether the filename came from walking the
705 disk, to avoid extra filesystem access.
705 disk, to avoid extra filesystem access.
706
706
707 If ignoremissing is True, missing path are returned
707 If ignoremissing is True, missing path are returned
708 unchanged. Otherwise, we try harder to normalize possibly
708 unchanged. Otherwise, we try harder to normalize possibly
709 existing path components.
709 existing path components.
710
710
711 The normalized case is determined based on the following precedence:
711 The normalized case is determined based on the following precedence:
712
712
713 - version of name already stored in the dirstate
713 - version of name already stored in the dirstate
714 - version of name stored on disk
714 - version of name stored on disk
715 - version provided via command arguments
715 - version provided via command arguments
716 '''
716 '''
717
717
718 if self._checkcase:
718 if self._checkcase:
719 return self._normalize(path, isknown, ignoremissing)
719 return self._normalize(path, isknown, ignoremissing)
720 return path
720 return path
721
721
722 def clear(self):
722 def clear(self):
723 self._map = {}
723 self._map = {}
724 self._nonnormalset = set()
724 self._nonnormalset = set()
725 self._otherparentset = set()
725 self._otherparentset = set()
726 if "_dirs" in self.__dict__:
726 if "_dirs" in self.__dict__:
727 delattr(self, "_dirs")
727 delattr(self, "_dirs")
728 self._copymap = {}
728 self._copymap = {}
729 self._pl = [nullid, nullid]
729 self._pl = [nullid, nullid]
730 self._lastnormaltime = 0
730 self._lastnormaltime = 0
731 self._updatedfiles.clear()
731 self._updatedfiles.clear()
732 self._dirty = True
732 self._dirty = True
733
733
734 def rebuild(self, parent, allfiles, changedfiles=None):
734 def rebuild(self, parent, allfiles, changedfiles=None):
735 if changedfiles is None:
735 if changedfiles is None:
736 # Rebuild entire dirstate
736 # Rebuild entire dirstate
737 changedfiles = allfiles
737 changedfiles = allfiles
738 lastnormaltime = self._lastnormaltime
738 lastnormaltime = self._lastnormaltime
739 self.clear()
739 self.clear()
740 self._lastnormaltime = lastnormaltime
740 self._lastnormaltime = lastnormaltime
741
741
742 if self._origpl is None:
742 if self._origpl is None:
743 self._origpl = self._pl
743 self._origpl = self._pl
744 self._pl = (parent, nullid)
744 self._pl = (parent, nullid)
745 for f in changedfiles:
745 for f in changedfiles:
746 if f in allfiles:
746 if f in allfiles:
747 self.normallookup(f)
747 self.normallookup(f)
748 else:
748 else:
749 self.drop(f)
749 self.drop(f)
750
750
751 self._dirty = True
751 self._dirty = True
752
752
753 def identity(self):
753 def identity(self):
754 '''Return identity of dirstate itself to detect changing in storage
754 '''Return identity of dirstate itself to detect changing in storage
755
755
756 If identity of previous dirstate is equal to this, writing
756 If identity of previous dirstate is equal to this, writing
757 changes based on the former dirstate out can keep consistency.
757 changes based on the former dirstate out can keep consistency.
758 '''
758 '''
759 return self._identity
759 return self._identity
760
760
761 def write(self, tr):
761 def write(self, tr):
762 if not self._dirty:
762 if not self._dirty:
763 return
763 return
764
764
765 filename = self._filename
765 filename = self._filename
766 if tr:
766 if tr:
767 # 'dirstate.write()' is not only for writing in-memory
767 # 'dirstate.write()' is not only for writing in-memory
768 # changes out, but also for dropping ambiguous timestamp.
768 # changes out, but also for dropping ambiguous timestamp.
769 # delayed writing re-raise "ambiguous timestamp issue".
769 # delayed writing re-raise "ambiguous timestamp issue".
770 # See also the wiki page below for detail:
770 # See also the wiki page below for detail:
771 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
771 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
772
772
773 # emulate dropping timestamp in 'parsers.pack_dirstate'
773 # emulate dropping timestamp in 'parsers.pack_dirstate'
774 now = _getfsnow(self._opener)
774 now = _getfsnow(self._opener)
775 dmap = self._map
775 dmap = self._map
776 for f in self._updatedfiles:
776 for f in self._updatedfiles:
777 e = dmap.get(f)
777 e = dmap.get(f)
778 if e is not None and e[0] == 'n' and e[3] == now:
778 if e is not None and e[0] == 'n' and e[3] == now:
779 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
779 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
780 self._nonnormalset.add(f)
780 self._nonnormalset.add(f)
781
781
782 # emulate that all 'dirstate.normal' results are written out
782 # emulate that all 'dirstate.normal' results are written out
783 self._lastnormaltime = 0
783 self._lastnormaltime = 0
784 self._updatedfiles.clear()
784 self._updatedfiles.clear()
785
785
786 # delay writing in-memory changes out
786 # delay writing in-memory changes out
787 tr.addfilegenerator('dirstate', (self._filename,),
787 tr.addfilegenerator('dirstate', (self._filename,),
788 self._writedirstate, location='plain')
788 self._writedirstate, location='plain')
789 return
789 return
790
790
791 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
791 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
792 self._writedirstate(st)
792 self._writedirstate(st)
793
793
794 def addparentchangecallback(self, category, callback):
794 def addparentchangecallback(self, category, callback):
795 """add a callback to be called when the wd parents are changed
795 """add a callback to be called when the wd parents are changed
796
796
797 Callback will be called with the following arguments:
797 Callback will be called with the following arguments:
798 dirstate, (oldp1, oldp2), (newp1, newp2)
798 dirstate, (oldp1, oldp2), (newp1, newp2)
799
799
800 Category is a unique identifier to allow overwriting an old callback
800 Category is a unique identifier to allow overwriting an old callback
801 with a newer callback.
801 with a newer callback.
802 """
802 """
803 self._plchangecallbacks[category] = callback
803 self._plchangecallbacks[category] = callback
804
804
805 def _writedirstate(self, st):
805 def _writedirstate(self, st):
806 # notify callbacks about parents change
806 # notify callbacks about parents change
807 if self._origpl is not None and self._origpl != self._pl:
807 if self._origpl is not None and self._origpl != self._pl:
808 for c, callback in sorted(self._plchangecallbacks.iteritems()):
808 for c, callback in sorted(self._plchangecallbacks.iteritems()):
809 callback(self, self._origpl, self._pl)
809 callback(self, self._origpl, self._pl)
810 self._origpl = None
810 self._origpl = None
811 # use the modification time of the newly created temporary file as the
811 # use the modification time of the newly created temporary file as the
812 # filesystem's notion of 'now'
812 # filesystem's notion of 'now'
813 now = util.fstat(st).st_mtime & _rangemask
813 now = util.fstat(st).st_mtime & _rangemask
814
814
815 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
815 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
816 # timestamp of each entries in dirstate, because of 'now > mtime'
816 # timestamp of each entries in dirstate, because of 'now > mtime'
817 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
817 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
818 if delaywrite > 0:
818 if delaywrite > 0:
819 # do we have any files to delay for?
819 # do we have any files to delay for?
820 for f, e in self._map.iteritems():
820 for f, e in self._map.iteritems():
821 if e[0] == 'n' and e[3] == now:
821 if e[0] == 'n' and e[3] == now:
822 import time # to avoid useless import
822 import time # to avoid useless import
823 # rather than sleep n seconds, sleep until the next
823 # rather than sleep n seconds, sleep until the next
824 # multiple of n seconds
824 # multiple of n seconds
825 clock = time.time()
825 clock = time.time()
826 start = int(clock) - (int(clock) % delaywrite)
826 start = int(clock) - (int(clock) % delaywrite)
827 end = start + delaywrite
827 end = start + delaywrite
828 time.sleep(end - clock)
828 time.sleep(end - clock)
829 now = end # trust our estimate that the end is near now
829 now = end # trust our estimate that the end is near now
830 break
830 break
831
831
832 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
832 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
833 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
833 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
834 st.close()
834 st.close()
835 self._lastnormaltime = 0
835 self._lastnormaltime = 0
836 self._dirty = self._dirtypl = False
836 self._dirty = self._dirtypl = False
837
837
838 def _dirignore(self, f):
838 def _dirignore(self, f):
839 if f == '.':
839 if f == '.':
840 return False
840 return False
841 if self._ignore(f):
841 if self._ignore(f):
842 return True
842 return True
843 for p in util.finddirs(f):
843 for p in util.finddirs(f):
844 if self._ignore(p):
844 if self._ignore(p):
845 return True
845 return True
846 return False
846 return False
847
847
848 def _ignorefiles(self):
848 def _ignorefiles(self):
849 files = []
849 files = []
850 if os.path.exists(self._join('.hgignore')):
850 if os.path.exists(self._join('.hgignore')):
851 files.append(self._join('.hgignore'))
851 files.append(self._join('.hgignore'))
852 for name, path in self._ui.configitems("ui"):
852 for name, path in self._ui.configitems("ui"):
853 if name == 'ignore' or name.startswith('ignore.'):
853 if name == 'ignore' or name.startswith('ignore.'):
854 # we need to use os.path.join here rather than self._join
854 # we need to use os.path.join here rather than self._join
855 # because path is arbitrary and user-specified
855 # because path is arbitrary and user-specified
856 files.append(os.path.join(self._rootdir, util.expandpath(path)))
856 files.append(os.path.join(self._rootdir, util.expandpath(path)))
857 return files
857 return files
858
858
859 def _ignorefileandline(self, f):
859 def _ignorefileandline(self, f):
860 files = collections.deque(self._ignorefiles())
860 files = collections.deque(self._ignorefiles())
861 visited = set()
861 visited = set()
862 while files:
862 while files:
863 i = files.popleft()
863 i = files.popleft()
864 patterns = matchmod.readpatternfile(i, self._ui.warn,
864 patterns = matchmod.readpatternfile(i, self._ui.warn,
865 sourceinfo=True)
865 sourceinfo=True)
866 for pattern, lineno, line in patterns:
866 for pattern, lineno, line in patterns:
867 kind, p = matchmod._patsplit(pattern, 'glob')
867 kind, p = matchmod._patsplit(pattern, 'glob')
868 if kind == "subinclude":
868 if kind == "subinclude":
869 if p not in visited:
869 if p not in visited:
870 files.append(p)
870 files.append(p)
871 continue
871 continue
872 m = matchmod.match(self._root, '', [], [pattern],
872 m = matchmod.match(self._root, '', [], [pattern],
873 warn=self._ui.warn)
873 warn=self._ui.warn)
874 if m(f):
874 if m(f):
875 return (i, lineno, line)
875 return (i, lineno, line)
876 visited.add(i)
876 visited.add(i)
877 return (None, -1, "")
877 return (None, -1, "")
878
878
879 def _walkexplicit(self, match, subrepos):
879 def _walkexplicit(self, match, subrepos):
880 '''Get stat data about the files explicitly specified by match.
880 '''Get stat data about the files explicitly specified by match.
881
881
882 Return a triple (results, dirsfound, dirsnotfound).
882 Return a triple (results, dirsfound, dirsnotfound).
883 - results is a mapping from filename to stat result. It also contains
883 - results is a mapping from filename to stat result. It also contains
884 listings mapping subrepos and .hg to None.
884 listings mapping subrepos and .hg to None.
885 - dirsfound is a list of files found to be directories.
885 - dirsfound is a list of files found to be directories.
886 - dirsnotfound is a list of files that the dirstate thinks are
886 - dirsnotfound is a list of files that the dirstate thinks are
887 directories and that were not found.'''
887 directories and that were not found.'''
888
888
889 def badtype(mode):
889 def badtype(mode):
890 kind = _('unknown')
890 kind = _('unknown')
891 if stat.S_ISCHR(mode):
891 if stat.S_ISCHR(mode):
892 kind = _('character device')
892 kind = _('character device')
893 elif stat.S_ISBLK(mode):
893 elif stat.S_ISBLK(mode):
894 kind = _('block device')
894 kind = _('block device')
895 elif stat.S_ISFIFO(mode):
895 elif stat.S_ISFIFO(mode):
896 kind = _('fifo')
896 kind = _('fifo')
897 elif stat.S_ISSOCK(mode):
897 elif stat.S_ISSOCK(mode):
898 kind = _('socket')
898 kind = _('socket')
899 elif stat.S_ISDIR(mode):
899 elif stat.S_ISDIR(mode):
900 kind = _('directory')
900 kind = _('directory')
901 return _('unsupported file type (type is %s)') % kind
901 return _('unsupported file type (type is %s)') % kind
902
902
903 matchedir = match.explicitdir
903 matchedir = match.explicitdir
904 badfn = match.bad
904 badfn = match.bad
905 dmap = self._map
905 dmap = self._map
906 lstat = os.lstat
906 lstat = os.lstat
907 getkind = stat.S_IFMT
907 getkind = stat.S_IFMT
908 dirkind = stat.S_IFDIR
908 dirkind = stat.S_IFDIR
909 regkind = stat.S_IFREG
909 regkind = stat.S_IFREG
910 lnkkind = stat.S_IFLNK
910 lnkkind = stat.S_IFLNK
911 join = self._join
911 join = self._join
912 dirsfound = []
912 dirsfound = []
913 foundadd = dirsfound.append
913 foundadd = dirsfound.append
914 dirsnotfound = []
914 dirsnotfound = []
915 notfoundadd = dirsnotfound.append
915 notfoundadd = dirsnotfound.append
916
916
917 if not match.isexact() and self._checkcase:
917 if not match.isexact() and self._checkcase:
918 normalize = self._normalize
918 normalize = self._normalize
919 else:
919 else:
920 normalize = None
920 normalize = None
921
921
922 files = sorted(match.files())
922 files = sorted(match.files())
923 subrepos.sort()
923 subrepos.sort()
924 i, j = 0, 0
924 i, j = 0, 0
925 while i < len(files) and j < len(subrepos):
925 while i < len(files) and j < len(subrepos):
926 subpath = subrepos[j] + "/"
926 subpath = subrepos[j] + "/"
927 if files[i] < subpath:
927 if files[i] < subpath:
928 i += 1
928 i += 1
929 continue
929 continue
930 while i < len(files) and files[i].startswith(subpath):
930 while i < len(files) and files[i].startswith(subpath):
931 del files[i]
931 del files[i]
932 j += 1
932 j += 1
933
933
934 if not files or '.' in files:
934 if not files or '.' in files:
935 files = ['.']
935 files = ['.']
936 results = dict.fromkeys(subrepos)
936 results = dict.fromkeys(subrepos)
937 results['.hg'] = None
937 results['.hg'] = None
938
938
939 alldirs = None
939 alldirs = None
940 for ff in files:
940 for ff in files:
941 # constructing the foldmap is expensive, so don't do it for the
941 # constructing the foldmap is expensive, so don't do it for the
942 # common case where files is ['.']
942 # common case where files is ['.']
943 if normalize and ff != '.':
943 if normalize and ff != '.':
944 nf = normalize(ff, False, True)
944 nf = normalize(ff, False, True)
945 else:
945 else:
946 nf = ff
946 nf = ff
947 if nf in results:
947 if nf in results:
948 continue
948 continue
949
949
950 try:
950 try:
951 st = lstat(join(nf))
951 st = lstat(join(nf))
952 kind = getkind(st.st_mode)
952 kind = getkind(st.st_mode)
953 if kind == dirkind:
953 if kind == dirkind:
954 if nf in dmap:
954 if nf in dmap:
955 # file replaced by dir on disk but still in dirstate
955 # file replaced by dir on disk but still in dirstate
956 results[nf] = None
956 results[nf] = None
957 if matchedir:
957 if matchedir:
958 matchedir(nf)
958 matchedir(nf)
959 foundadd((nf, ff))
959 foundadd((nf, ff))
960 elif kind == regkind or kind == lnkkind:
960 elif kind == regkind or kind == lnkkind:
961 results[nf] = st
961 results[nf] = st
962 else:
962 else:
963 badfn(ff, badtype(kind))
963 badfn(ff, badtype(kind))
964 if nf in dmap:
964 if nf in dmap:
965 results[nf] = None
965 results[nf] = None
966 except OSError as inst: # nf not found on disk - it is dirstate only
966 except OSError as inst: # nf not found on disk - it is dirstate only
967 if nf in dmap: # does it exactly match a missing file?
967 if nf in dmap: # does it exactly match a missing file?
968 results[nf] = None
968 results[nf] = None
969 else: # does it match a missing directory?
969 else: # does it match a missing directory?
970 if alldirs is None:
970 if alldirs is None:
971 alldirs = util.dirs(dmap)
971 alldirs = util.dirs(dmap)
972 if nf in alldirs:
972 if nf in alldirs:
973 if matchedir:
973 if matchedir:
974 matchedir(nf)
974 matchedir(nf)
975 notfoundadd(nf)
975 notfoundadd(nf)
976 else:
976 else:
977 badfn(ff, inst.strerror)
977 badfn(ff, inst.strerror)
978
978
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
980 # a case-only rename. Prune the stat object for any file that does not
980 # a case-only rename. Prune the stat object for any file that does not
981 # match the case in the filesystem, if there are multiple files that
981 # match the case in the filesystem, if there are multiple files that
982 # normalize to the same path.
982 # normalize to the same path.
983 if match.isexact() and self._checkcase:
983 if match.isexact() and self._checkcase:
984 normed = {}
984 normed = {}
985
985
986 for f, st in results.iteritems():
986 for f, st in results.iteritems():
987 if st is None:
987 if st is None:
988 continue
988 continue
989
989
990 nc = util.normcase(f)
990 nc = util.normcase(f)
991 paths = normed.get(nc)
991 paths = normed.get(nc)
992
992
993 if paths is None:
993 if paths is None:
994 paths = set()
994 paths = set()
995 normed[nc] = paths
995 normed[nc] = paths
996
996
997 paths.add(f)
997 paths.add(f)
998
998
999 for norm, paths in normed.iteritems():
999 for norm, paths in normed.iteritems():
1000 if len(paths) > 1:
1000 if len(paths) > 1:
1001 for path in paths:
1001 for path in paths:
1002 folded = self._discoverpath(path, norm, True, None,
1002 folded = self._discoverpath(path, norm, True, None,
1003 self._dirfoldmap)
1003 self._dirfoldmap)
1004 if path != folded:
1004 if path != folded:
1005 results[path] = None
1005 results[path] = None
1006
1006
1007 return results, dirsfound, dirsnotfound
1007 return results, dirsfound, dirsnotfound
1008
1008
1009 def walk(self, match, subrepos, unknown, ignored, full=True):
1009 def walk(self, match, subrepos, unknown, ignored, full=True):
1010 '''
1010 '''
1011 Walk recursively through the directory tree, finding all files
1011 Walk recursively through the directory tree, finding all files
1012 matched by match.
1012 matched by match.
1013
1013
1014 If full is False, maybe skip some known-clean files.
1014 If full is False, maybe skip some known-clean files.
1015
1015
1016 Return a dict mapping filename to stat-like object (either
1016 Return a dict mapping filename to stat-like object (either
1017 mercurial.osutil.stat instance or return value of os.stat()).
1017 mercurial.osutil.stat instance or return value of os.stat()).
1018
1018
1019 '''
1019 '''
1020 # full is a flag that extensions that hook into walk can use -- this
1020 # full is a flag that extensions that hook into walk can use -- this
1021 # implementation doesn't use it at all. This satisfies the contract
1021 # implementation doesn't use it at all. This satisfies the contract
1022 # because we only guarantee a "maybe".
1022 # because we only guarantee a "maybe".
1023
1023
1024 if ignored:
1024 if ignored:
1025 ignore = util.never
1025 ignore = util.never
1026 dirignore = util.never
1026 dirignore = util.never
1027 elif unknown:
1027 elif unknown:
1028 ignore = self._ignore
1028 ignore = self._ignore
1029 dirignore = self._dirignore
1029 dirignore = self._dirignore
1030 else:
1030 else:
1031 # if not unknown and not ignored, drop dir recursion and step 2
1031 # if not unknown and not ignored, drop dir recursion and step 2
1032 ignore = util.always
1032 ignore = util.always
1033 dirignore = util.always
1033 dirignore = util.always
1034
1034
1035 matchfn = match.matchfn
1035 matchfn = match.matchfn
1036 matchalways = match.always()
1036 matchalways = match.always()
1037 matchtdir = match.traversedir
1037 matchtdir = match.traversedir
1038 dmap = self._map
1038 dmap = self._map
1039 listdir = util.listdir
1039 listdir = util.listdir
1040 lstat = os.lstat
1040 lstat = os.lstat
1041 dirkind = stat.S_IFDIR
1041 dirkind = stat.S_IFDIR
1042 regkind = stat.S_IFREG
1042 regkind = stat.S_IFREG
1043 lnkkind = stat.S_IFLNK
1043 lnkkind = stat.S_IFLNK
1044 join = self._join
1044 join = self._join
1045
1045
1046 exact = skipstep3 = False
1046 exact = skipstep3 = False
1047 if match.isexact(): # match.exact
1047 if match.isexact(): # match.exact
1048 exact = True
1048 exact = True
1049 dirignore = util.always # skip step 2
1049 dirignore = util.always # skip step 2
1050 elif match.prefix(): # match.match, no patterns
1050 elif match.prefix(): # match.match, no patterns
1051 skipstep3 = True
1051 skipstep3 = True
1052
1052
1053 if not exact and self._checkcase:
1053 if not exact and self._checkcase:
1054 normalize = self._normalize
1054 normalize = self._normalize
1055 normalizefile = self._normalizefile
1055 normalizefile = self._normalizefile
1056 skipstep3 = False
1056 skipstep3 = False
1057 else:
1057 else:
1058 normalize = self._normalize
1058 normalize = self._normalize
1059 normalizefile = None
1059 normalizefile = None
1060
1060
1061 # step 1: find all explicit files
1061 # step 1: find all explicit files
1062 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1062 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1063
1063
1064 skipstep3 = skipstep3 and not (work or dirsnotfound)
1064 skipstep3 = skipstep3 and not (work or dirsnotfound)
1065 work = [d for d in work if not dirignore(d[0])]
1065 work = [d for d in work if not dirignore(d[0])]
1066
1066
1067 # step 2: visit subdirectories
1067 # step 2: visit subdirectories
1068 def traverse(work, alreadynormed):
1068 def traverse(work, alreadynormed):
1069 wadd = work.append
1069 wadd = work.append
1070 while work:
1070 while work:
1071 nd = work.pop()
1071 nd = work.pop()
1072 if not match.visitdir(nd):
1072 if not match.visitdir(nd):
1073 continue
1073 continue
1074 skip = None
1074 skip = None
1075 if nd == '.':
1075 if nd == '.':
1076 nd = ''
1076 nd = ''
1077 else:
1077 else:
1078 skip = '.hg'
1078 skip = '.hg'
1079 try:
1079 try:
1080 entries = listdir(join(nd), stat=True, skip=skip)
1080 entries = listdir(join(nd), stat=True, skip=skip)
1081 except OSError as inst:
1081 except OSError as inst:
1082 if inst.errno in (errno.EACCES, errno.ENOENT):
1082 if inst.errno in (errno.EACCES, errno.ENOENT):
1083 match.bad(self.pathto(nd), inst.strerror)
1083 match.bad(self.pathto(nd), inst.strerror)
1084 continue
1084 continue
1085 raise
1085 raise
1086 for f, kind, st in entries:
1086 for f, kind, st in entries:
1087 if normalizefile:
1087 if normalizefile:
1088 # even though f might be a directory, we're only
1088 # even though f might be a directory, we're only
1089 # interested in comparing it to files currently in the
1089 # interested in comparing it to files currently in the
1090 # dmap -- therefore normalizefile is enough
1090 # dmap -- therefore normalizefile is enough
1091 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1091 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1092 True)
1092 True)
1093 else:
1093 else:
1094 nf = nd and (nd + "/" + f) or f
1094 nf = nd and (nd + "/" + f) or f
1095 if nf not in results:
1095 if nf not in results:
1096 if kind == dirkind:
1096 if kind == dirkind:
1097 if not ignore(nf):
1097 if not ignore(nf):
1098 if matchtdir:
1098 if matchtdir:
1099 matchtdir(nf)
1099 matchtdir(nf)
1100 wadd(nf)
1100 wadd(nf)
1101 if nf in dmap and (matchalways or matchfn(nf)):
1101 if nf in dmap and (matchalways or matchfn(nf)):
1102 results[nf] = None
1102 results[nf] = None
1103 elif kind == regkind or kind == lnkkind:
1103 elif kind == regkind or kind == lnkkind:
1104 if nf in dmap:
1104 if nf in dmap:
1105 if matchalways or matchfn(nf):
1105 if matchalways or matchfn(nf):
1106 results[nf] = st
1106 results[nf] = st
1107 elif ((matchalways or matchfn(nf))
1107 elif ((matchalways or matchfn(nf))
1108 and not ignore(nf)):
1108 and not ignore(nf)):
1109 # unknown file -- normalize if necessary
1109 # unknown file -- normalize if necessary
1110 if not alreadynormed:
1110 if not alreadynormed:
1111 nf = normalize(nf, False, True)
1111 nf = normalize(nf, False, True)
1112 results[nf] = st
1112 results[nf] = st
1113 elif nf in dmap and (matchalways or matchfn(nf)):
1113 elif nf in dmap and (matchalways or matchfn(nf)):
1114 results[nf] = None
1114 results[nf] = None
1115
1115
1116 for nd, d in work:
1116 for nd, d in work:
1117 # alreadynormed means that processwork doesn't have to do any
1117 # alreadynormed means that processwork doesn't have to do any
1118 # expensive directory normalization
1118 # expensive directory normalization
1119 alreadynormed = not normalize or nd == d
1119 alreadynormed = not normalize or nd == d
1120 traverse([d], alreadynormed)
1120 traverse([d], alreadynormed)
1121
1121
1122 for s in subrepos:
1122 for s in subrepos:
1123 del results[s]
1123 del results[s]
1124 del results['.hg']
1124 del results['.hg']
1125
1125
1126 # step 3: visit remaining files from dmap
1126 # step 3: visit remaining files from dmap
1127 if not skipstep3 and not exact:
1127 if not skipstep3 and not exact:
1128 # If a dmap file is not in results yet, it was either
1128 # If a dmap file is not in results yet, it was either
1129 # a) not matching matchfn b) ignored, c) missing, or d) under a
1129 # a) not matching matchfn b) ignored, c) missing, or d) under a
1130 # symlink directory.
1130 # symlink directory.
1131 if not results and matchalways:
1131 if not results and matchalways:
1132 visit = [f for f in dmap]
1132 visit = [f for f in dmap]
1133 else:
1133 else:
1134 visit = [f for f in dmap if f not in results and matchfn(f)]
1134 visit = [f for f in dmap if f not in results and matchfn(f)]
1135 visit.sort()
1135 visit.sort()
1136
1136
1137 if unknown:
1137 if unknown:
1138 # unknown == True means we walked all dirs under the roots
1138 # unknown == True means we walked all dirs under the roots
1139 # that wasn't ignored, and everything that matched was stat'ed
1139 # that wasn't ignored, and everything that matched was stat'ed
1140 # and is already in results.
1140 # and is already in results.
1141 # The rest must thus be ignored or under a symlink.
1141 # The rest must thus be ignored or under a symlink.
1142 audit_path = pathutil.pathauditor(self._root)
1142 audit_path = pathutil.pathauditor(self._root)
1143
1143
1144 for nf in iter(visit):
1144 for nf in iter(visit):
1145 # If a stat for the same file was already added with a
1145 # If a stat for the same file was already added with a
1146 # different case, don't add one for this, since that would
1146 # different case, don't add one for this, since that would
1147 # make it appear as if the file exists under both names
1147 # make it appear as if the file exists under both names
1148 # on disk.
1148 # on disk.
1149 if (normalizefile and
1149 if (normalizefile and
1150 normalizefile(nf, True, True) in results):
1150 normalizefile(nf, True, True) in results):
1151 results[nf] = None
1151 results[nf] = None
1152 # Report ignored items in the dmap as long as they are not
1152 # Report ignored items in the dmap as long as they are not
1153 # under a symlink directory.
1153 # under a symlink directory.
1154 elif audit_path.check(nf):
1154 elif audit_path.check(nf):
1155 try:
1155 try:
1156 results[nf] = lstat(join(nf))
1156 results[nf] = lstat(join(nf))
1157 # file was just ignored, no links, and exists
1157 # file was just ignored, no links, and exists
1158 except OSError:
1158 except OSError:
1159 # file doesn't exist
1159 # file doesn't exist
1160 results[nf] = None
1160 results[nf] = None
1161 else:
1161 else:
1162 # It's either missing or under a symlink directory
1162 # It's either missing or under a symlink directory
1163 # which we in this case report as missing
1163 # which we in this case report as missing
1164 results[nf] = None
1164 results[nf] = None
1165 else:
1165 else:
1166 # We may not have walked the full directory tree above,
1166 # We may not have walked the full directory tree above,
1167 # so stat and check everything we missed.
1167 # so stat and check everything we missed.
1168 iv = iter(visit)
1168 iv = iter(visit)
1169 for st in util.statfiles([join(i) for i in visit]):
1169 for st in util.statfiles([join(i) for i in visit]):
1170 results[next(iv)] = st
1170 results[next(iv)] = st
1171 return results
1171 return results
1172
1172
1173 def status(self, match, subrepos, ignored, clean, unknown):
1173 def status(self, match, subrepos, ignored, clean, unknown):
1174 '''Determine the status of the working copy relative to the
1174 '''Determine the status of the working copy relative to the
1175 dirstate and return a pair of (unsure, status), where status is of type
1175 dirstate and return a pair of (unsure, status), where status is of type
1176 scmutil.status and:
1176 scmutil.status and:
1177
1177
1178 unsure:
1178 unsure:
1179 files that might have been modified since the dirstate was
1179 files that might have been modified since the dirstate was
1180 written, but need to be read to be sure (size is the same
1180 written, but need to be read to be sure (size is the same
1181 but mtime differs)
1181 but mtime differs)
1182 status.modified:
1182 status.modified:
1183 files that have definitely been modified since the dirstate
1183 files that have definitely been modified since the dirstate
1184 was written (different size or mode)
1184 was written (different size or mode)
1185 status.clean:
1185 status.clean:
1186 files that have definitely not been modified since the
1186 files that have definitely not been modified since the
1187 dirstate was written
1187 dirstate was written
1188 '''
1188 '''
1189 listignored, listclean, listunknown = ignored, clean, unknown
1189 listignored, listclean, listunknown = ignored, clean, unknown
1190 lookup, modified, added, unknown, ignored = [], [], [], [], []
1190 lookup, modified, added, unknown, ignored = [], [], [], [], []
1191 removed, deleted, clean = [], [], []
1191 removed, deleted, clean = [], [], []
1192
1192
1193 dmap = self._map
1193 dmap = self._map
1194 ladd = lookup.append # aka "unsure"
1194 ladd = lookup.append # aka "unsure"
1195 madd = modified.append
1195 madd = modified.append
1196 aadd = added.append
1196 aadd = added.append
1197 uadd = unknown.append
1197 uadd = unknown.append
1198 iadd = ignored.append
1198 iadd = ignored.append
1199 radd = removed.append
1199 radd = removed.append
1200 dadd = deleted.append
1200 dadd = deleted.append
1201 cadd = clean.append
1201 cadd = clean.append
1202 mexact = match.exact
1202 mexact = match.exact
1203 dirignore = self._dirignore
1203 dirignore = self._dirignore
1204 checkexec = self._checkexec
1204 checkexec = self._checkexec
1205 copymap = self._copymap
1205 copymap = self._copymap
1206 lastnormaltime = self._lastnormaltime
1206 lastnormaltime = self._lastnormaltime
1207
1207
1208 # We need to do full walks when either
1208 # We need to do full walks when either
1209 # - we're listing all clean files, or
1209 # - we're listing all clean files, or
1210 # - match.traversedir does something, because match.traversedir should
1210 # - match.traversedir does something, because match.traversedir should
1211 # be called for every dir in the working dir
1211 # be called for every dir in the working dir
1212 full = listclean or match.traversedir is not None
1212 full = listclean or match.traversedir is not None
1213 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1213 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1214 full=full).iteritems():
1214 full=full).iteritems():
1215 if fn not in dmap:
1215 if fn not in dmap:
1216 if (listignored or mexact(fn)) and dirignore(fn):
1216 if (listignored or mexact(fn)) and dirignore(fn):
1217 if listignored:
1217 if listignored:
1218 iadd(fn)
1218 iadd(fn)
1219 else:
1219 else:
1220 uadd(fn)
1220 uadd(fn)
1221 continue
1221 continue
1222
1222
1223 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1223 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1224 # written like that for performance reasons. dmap[fn] is not a
1224 # written like that for performance reasons. dmap[fn] is not a
1225 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1225 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1226 # opcode has fast paths when the value to be unpacked is a tuple or
1226 # opcode has fast paths when the value to be unpacked is a tuple or
1227 # a list, but falls back to creating a full-fledged iterator in
1227 # a list, but falls back to creating a full-fledged iterator in
1228 # general. That is much slower than simply accessing and storing the
1228 # general. That is much slower than simply accessing and storing the
1229 # tuple members one by one.
1229 # tuple members one by one.
1230 t = dmap[fn]
1230 t = dmap[fn]
1231 state = t[0]
1231 state = t[0]
1232 mode = t[1]
1232 mode = t[1]
1233 size = t[2]
1233 size = t[2]
1234 time = t[3]
1234 time = t[3]
1235
1235
1236 if not st and state in "nma":
1236 if not st and state in "nma":
1237 dadd(fn)
1237 dadd(fn)
1238 elif state == 'n':
1238 elif state == 'n':
1239 if (size >= 0 and
1239 if (size >= 0 and
1240 ((size != st.st_size and size != st.st_size & _rangemask)
1240 ((size != st.st_size and size != st.st_size & _rangemask)
1241 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1241 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1242 or size == -2 # other parent
1242 or size == -2 # other parent
1243 or fn in copymap):
1243 or fn in copymap):
1244 madd(fn)
1244 madd(fn)
1245 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1245 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1246 ladd(fn)
1246 ladd(fn)
1247 elif st.st_mtime == lastnormaltime:
1247 elif st.st_mtime == lastnormaltime:
1248 # fn may have just been marked as normal and it may have
1248 # fn may have just been marked as normal and it may have
1249 # changed in the same second without changing its size.
1249 # changed in the same second without changing its size.
1250 # This can happen if we quickly do multiple commits.
1250 # This can happen if we quickly do multiple commits.
1251 # Force lookup, so we don't miss such a racy file change.
1251 # Force lookup, so we don't miss such a racy file change.
1252 ladd(fn)
1252 ladd(fn)
1253 elif listclean:
1253 elif listclean:
1254 cadd(fn)
1254 cadd(fn)
1255 elif state == 'm':
1255 elif state == 'm':
1256 madd(fn)
1256 madd(fn)
1257 elif state == 'a':
1257 elif state == 'a':
1258 aadd(fn)
1258 aadd(fn)
1259 elif state == 'r':
1259 elif state == 'r':
1260 radd(fn)
1260 radd(fn)
1261
1261
1262 return (lookup, scmutil.status(modified, added, removed, deleted,
1262 return (lookup, scmutil.status(modified, added, removed, deleted,
1263 unknown, ignored, clean))
1263 unknown, ignored, clean))
1264
1264
1265 def matches(self, match):
1265 def matches(self, match):
1266 '''
1266 '''
1267 return files in the dirstate (in whatever state) filtered by match
1267 return files in the dirstate (in whatever state) filtered by match
1268 '''
1268 '''
1269 dmap = self._map
1269 dmap = self._map
1270 if match.always():
1270 if match.always():
1271 return dmap.keys()
1271 return dmap.keys()
1272 files = match.files()
1272 files = match.files()
1273 if match.isexact():
1273 if match.isexact():
1274 # fast path -- filter the other way around, since typically files is
1274 # fast path -- filter the other way around, since typically files is
1275 # much smaller than dmap
1275 # much smaller than dmap
1276 return [f for f in files if f in dmap]
1276 return [f for f in files if f in dmap]
1277 if match.prefix() and all(fn in dmap for fn in files):
1277 if match.prefix() and all(fn in dmap for fn in files):
1278 # fast path -- all the values are known to be files, so just return
1278 # fast path -- all the values are known to be files, so just return
1279 # that
1279 # that
1280 return list(files)
1280 return list(files)
1281 return [f for f in dmap if match(f)]
1281 return [f for f in dmap if match(f)]
1282
1282
1283 def _actualfilename(self, tr):
1283 def _actualfilename(self, tr):
1284 if tr:
1284 if tr:
1285 return self._pendingfilename
1285 return self._pendingfilename
1286 else:
1286 else:
1287 return self._filename
1287 return self._filename
1288
1288
1289 def savebackup(self, tr, suffix='', prefix=''):
1289 def savebackup(self, tr, suffix='', prefix=''):
1290 '''Save current dirstate into backup file with suffix'''
1290 '''Save current dirstate into backup file with suffix'''
1291 assert len(suffix) > 0 or len(prefix) > 0
1291 assert len(suffix) > 0 or len(prefix) > 0
1292 filename = self._actualfilename(tr)
1292 filename = self._actualfilename(tr)
1293
1293
1294 # use '_writedirstate' instead of 'write' to write changes certainly,
1294 # use '_writedirstate' instead of 'write' to write changes certainly,
1295 # because the latter omits writing out if transaction is running.
1295 # because the latter omits writing out if transaction is running.
1296 # output file will be used to create backup of dirstate at this point.
1296 # output file will be used to create backup of dirstate at this point.
1297 if self._dirty or not self._opener.exists(filename):
1297 if self._dirty or not self._opener.exists(filename):
1298 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1298 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1299 checkambig=True))
1299 checkambig=True))
1300
1300
1301 if tr:
1301 if tr:
1302 # ensure that subsequent tr.writepending returns True for
1302 # ensure that subsequent tr.writepending returns True for
1303 # changes written out above, even if dirstate is never
1303 # changes written out above, even if dirstate is never
1304 # changed after this
1304 # changed after this
1305 tr.addfilegenerator('dirstate', (self._filename,),
1305 tr.addfilegenerator('dirstate', (self._filename,),
1306 self._writedirstate, location='plain')
1306 self._writedirstate, location='plain')
1307
1307
1308 # ensure that pending file written above is unlinked at
1308 # ensure that pending file written above is unlinked at
1309 # failure, even if tr.writepending isn't invoked until the
1309 # failure, even if tr.writepending isn't invoked until the
1310 # end of this transaction
1310 # end of this transaction
1311 tr.registertmp(filename, location='plain')
1311 tr.registertmp(filename, location='plain')
1312
1312
1313 backupname = prefix + self._filename + suffix
1313 backupname = prefix + self._filename + suffix
1314 assert backupname != filename
1314 assert backupname != filename
1315 self._opener.tryunlink(backupname)
1315 self._opener.tryunlink(backupname)
1316 # hardlink backup is okay because _writedirstate is always called
1316 # hardlink backup is okay because _writedirstate is always called
1317 # with an "atomictemp=True" file.
1317 # with an "atomictemp=True" file.
1318 util.copyfile(self._opener.join(filename),
1318 util.copyfile(self._opener.join(filename),
1319 self._opener.join(backupname), hardlink=True)
1319 self._opener.join(backupname), hardlink=True)
1320
1320
1321 def restorebackup(self, tr, suffix='', prefix=''):
1321 def restorebackup(self, tr, suffix='', prefix=''):
1322 '''Restore dirstate by backup file with suffix'''
1322 '''Restore dirstate by backup file with suffix'''
1323 assert len(suffix) > 0 or len(prefix) > 0
1323 assert len(suffix) > 0 or len(prefix) > 0
1324 # this "invalidate()" prevents "wlock.release()" from writing
1324 # this "invalidate()" prevents "wlock.release()" from writing
1325 # changes of dirstate out after restoring from backup file
1325 # changes of dirstate out after restoring from backup file
1326 self.invalidate()
1326 self.invalidate()
1327 filename = self._actualfilename(tr)
1327 filename = self._actualfilename(tr)
1328 # using self._filename to avoid having "pending" in the backup filename
1328 # using self._filename to avoid having "pending" in the backup filename
1329 self._opener.rename(prefix + self._filename + suffix, filename,
1329 self._opener.rename(prefix + self._filename + suffix, filename,
1330 checkambig=True)
1330 checkambig=True)
1331
1331
1332 def clearbackup(self, tr, suffix='', prefix=''):
1332 def clearbackup(self, tr, suffix='', prefix=''):
1333 '''Clear backup file with suffix'''
1333 '''Clear backup file with suffix'''
1334 assert len(suffix) > 0 or len(prefix) > 0
1334 assert len(suffix) > 0 or len(prefix) > 0
1335 # using self._filename to avoid having "pending" in the backup filename
1335 # using self._filename to avoid having "pending" in the backup filename
1336 self._opener.unlink(prefix + self._filename + suffix)
1336 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now