##// END OF EJS Templates
dirstate: avoid unnecessary load+dump during backup...
Jun Wu -
r31208:fc57a8b9 default
parent child Browse files
Show More
@@ -1,1262 +1,1263 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 propertycache = util.propertycache
30 propertycache = util.propertycache
31 filecache = scmutil.filecache
31 filecache = scmutil.filecache
32 _rangemask = 0x7fffffff
32 _rangemask = 0x7fffffff
33
33
34 dirstatetuple = parsers.dirstatetuple
34 dirstatetuple = parsers.dirstatetuple
35
35
36 class repocache(filecache):
36 class repocache(filecache):
37 """filecache for files in .hg/"""
37 """filecache for files in .hg/"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj._opener.join(fname)
39 return obj._opener.join(fname)
40
40
41 class rootcache(filecache):
41 class rootcache(filecache):
42 """filecache for files in the repository root"""
42 """filecache for files in the repository root"""
43 def join(self, obj, fname):
43 def join(self, obj, fname):
44 return obj._join(fname)
44 return obj._join(fname)
45
45
46 def _getfsnow(vfs):
46 def _getfsnow(vfs):
47 '''Get "now" timestamp on filesystem'''
47 '''Get "now" timestamp on filesystem'''
48 tmpfd, tmpname = vfs.mkstemp()
48 tmpfd, tmpname = vfs.mkstemp()
49 try:
49 try:
50 return os.fstat(tmpfd).st_mtime
50 return os.fstat(tmpfd).st_mtime
51 finally:
51 finally:
52 os.close(tmpfd)
52 os.close(tmpfd)
53 vfs.unlink(tmpname)
53 vfs.unlink(tmpname)
54
54
55 def nonnormalentries(dmap):
55 def nonnormalentries(dmap):
56 '''Compute the nonnormal dirstate entries from the dmap'''
56 '''Compute the nonnormal dirstate entries from the dmap'''
57 try:
57 try:
58 return parsers.nonnormalentries(dmap)
58 return parsers.nonnormalentries(dmap)
59 except AttributeError:
59 except AttributeError:
60 return set(fname for fname, e in dmap.iteritems()
60 return set(fname for fname, e in dmap.iteritems()
61 if e[0] != 'n' or e[3] == -1)
61 if e[0] != 'n' or e[3] == -1)
62
62
63 class dirstate(object):
63 class dirstate(object):
64
64
65 def __init__(self, opener, ui, root, validate):
65 def __init__(self, opener, ui, root, validate):
66 '''Create a new dirstate object.
66 '''Create a new dirstate object.
67
67
68 opener is an open()-like callable that can be used to open the
68 opener is an open()-like callable that can be used to open the
69 dirstate file; root is the root of the directory tracked by
69 dirstate file; root is the root of the directory tracked by
70 the dirstate.
70 the dirstate.
71 '''
71 '''
72 self._opener = opener
72 self._opener = opener
73 self._validate = validate
73 self._validate = validate
74 self._root = root
74 self._root = root
75 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
75 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
76 # UNC path pointing to root share (issue4557)
76 # UNC path pointing to root share (issue4557)
77 self._rootdir = pathutil.normasprefix(root)
77 self._rootdir = pathutil.normasprefix(root)
78 # internal config: ui.forcecwd
78 # internal config: ui.forcecwd
79 forcecwd = ui.config('ui', 'forcecwd')
79 forcecwd = ui.config('ui', 'forcecwd')
80 if forcecwd:
80 if forcecwd:
81 self._cwd = forcecwd
81 self._cwd = forcecwd
82 self._dirty = False
82 self._dirty = False
83 self._dirtypl = False
83 self._dirtypl = False
84 self._lastnormaltime = 0
84 self._lastnormaltime = 0
85 self._ui = ui
85 self._ui = ui
86 self._filecache = {}
86 self._filecache = {}
87 self._parentwriters = 0
87 self._parentwriters = 0
88 self._filename = 'dirstate'
88 self._filename = 'dirstate'
89 self._pendingfilename = '%s.pending' % self._filename
89 self._pendingfilename = '%s.pending' % self._filename
90 self._plchangecallbacks = {}
90 self._plchangecallbacks = {}
91 self._origpl = None
91 self._origpl = None
92 self._updatedfiles = set()
92 self._updatedfiles = set()
93
93
94 # for consistent view between _pl() and _read() invocations
94 # for consistent view between _pl() and _read() invocations
95 self._pendingmode = None
95 self._pendingmode = None
96
96
97 def beginparentchange(self):
97 def beginparentchange(self):
98 '''Marks the beginning of a set of changes that involve changing
98 '''Marks the beginning of a set of changes that involve changing
99 the dirstate parents. If there is an exception during this time,
99 the dirstate parents. If there is an exception during this time,
100 the dirstate will not be written when the wlock is released. This
100 the dirstate will not be written when the wlock is released. This
101 prevents writing an incoherent dirstate where the parent doesn't
101 prevents writing an incoherent dirstate where the parent doesn't
102 match the contents.
102 match the contents.
103 '''
103 '''
104 self._parentwriters += 1
104 self._parentwriters += 1
105
105
106 def endparentchange(self):
106 def endparentchange(self):
107 '''Marks the end of a set of changes that involve changing the
107 '''Marks the end of a set of changes that involve changing the
108 dirstate parents. Once all parent changes have been marked done,
108 dirstate parents. Once all parent changes have been marked done,
109 the wlock will be free to write the dirstate on release.
109 the wlock will be free to write the dirstate on release.
110 '''
110 '''
111 if self._parentwriters > 0:
111 if self._parentwriters > 0:
112 self._parentwriters -= 1
112 self._parentwriters -= 1
113
113
114 def pendingparentchange(self):
114 def pendingparentchange(self):
115 '''Returns true if the dirstate is in the middle of a set of changes
115 '''Returns true if the dirstate is in the middle of a set of changes
116 that modify the dirstate parent.
116 that modify the dirstate parent.
117 '''
117 '''
118 return self._parentwriters > 0
118 return self._parentwriters > 0
119
119
120 @propertycache
120 @propertycache
121 def _map(self):
121 def _map(self):
122 '''Return the dirstate contents as a map from filename to
122 '''Return the dirstate contents as a map from filename to
123 (state, mode, size, time).'''
123 (state, mode, size, time).'''
124 self._read()
124 self._read()
125 return self._map
125 return self._map
126
126
127 @propertycache
127 @propertycache
128 def _copymap(self):
128 def _copymap(self):
129 self._read()
129 self._read()
130 return self._copymap
130 return self._copymap
131
131
132 @propertycache
132 @propertycache
133 def _nonnormalset(self):
133 def _nonnormalset(self):
134 return nonnormalentries(self._map)
134 return nonnormalentries(self._map)
135
135
136 @propertycache
136 @propertycache
137 def _filefoldmap(self):
137 def _filefoldmap(self):
138 try:
138 try:
139 makefilefoldmap = parsers.make_file_foldmap
139 makefilefoldmap = parsers.make_file_foldmap
140 except AttributeError:
140 except AttributeError:
141 pass
141 pass
142 else:
142 else:
143 return makefilefoldmap(self._map, util.normcasespec,
143 return makefilefoldmap(self._map, util.normcasespec,
144 util.normcasefallback)
144 util.normcasefallback)
145
145
146 f = {}
146 f = {}
147 normcase = util.normcase
147 normcase = util.normcase
148 for name, s in self._map.iteritems():
148 for name, s in self._map.iteritems():
149 if s[0] != 'r':
149 if s[0] != 'r':
150 f[normcase(name)] = name
150 f[normcase(name)] = name
151 f['.'] = '.' # prevents useless util.fspath() invocation
151 f['.'] = '.' # prevents useless util.fspath() invocation
152 return f
152 return f
153
153
154 @propertycache
154 @propertycache
155 def _dirfoldmap(self):
155 def _dirfoldmap(self):
156 f = {}
156 f = {}
157 normcase = util.normcase
157 normcase = util.normcase
158 for name in self._dirs:
158 for name in self._dirs:
159 f[normcase(name)] = name
159 f[normcase(name)] = name
160 return f
160 return f
161
161
162 @repocache('branch')
162 @repocache('branch')
163 def _branch(self):
163 def _branch(self):
164 try:
164 try:
165 return self._opener.read("branch").strip() or "default"
165 return self._opener.read("branch").strip() or "default"
166 except IOError as inst:
166 except IOError as inst:
167 if inst.errno != errno.ENOENT:
167 if inst.errno != errno.ENOENT:
168 raise
168 raise
169 return "default"
169 return "default"
170
170
171 @propertycache
171 @propertycache
172 def _pl(self):
172 def _pl(self):
173 try:
173 try:
174 fp = self._opendirstatefile()
174 fp = self._opendirstatefile()
175 st = fp.read(40)
175 st = fp.read(40)
176 fp.close()
176 fp.close()
177 l = len(st)
177 l = len(st)
178 if l == 40:
178 if l == 40:
179 return st[:20], st[20:40]
179 return st[:20], st[20:40]
180 elif l > 0 and l < 40:
180 elif l > 0 and l < 40:
181 raise error.Abort(_('working directory state appears damaged!'))
181 raise error.Abort(_('working directory state appears damaged!'))
182 except IOError as err:
182 except IOError as err:
183 if err.errno != errno.ENOENT:
183 if err.errno != errno.ENOENT:
184 raise
184 raise
185 return [nullid, nullid]
185 return [nullid, nullid]
186
186
187 @propertycache
187 @propertycache
188 def _dirs(self):
188 def _dirs(self):
189 return util.dirs(self._map, 'r')
189 return util.dirs(self._map, 'r')
190
190
191 def dirs(self):
191 def dirs(self):
192 return self._dirs
192 return self._dirs
193
193
194 @rootcache('.hgignore')
194 @rootcache('.hgignore')
195 def _ignore(self):
195 def _ignore(self):
196 files = self._ignorefiles()
196 files = self._ignorefiles()
197 if not files:
197 if not files:
198 return util.never
198 return util.never
199
199
200 pats = ['include:%s' % f for f in files]
200 pats = ['include:%s' % f for f in files]
201 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
201 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
202
202
203 @propertycache
203 @propertycache
204 def _slash(self):
204 def _slash(self):
205 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
205 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
206
206
207 @propertycache
207 @propertycache
208 def _checklink(self):
208 def _checklink(self):
209 return util.checklink(self._root)
209 return util.checklink(self._root)
210
210
211 @propertycache
211 @propertycache
212 def _checkexec(self):
212 def _checkexec(self):
213 return util.checkexec(self._root)
213 return util.checkexec(self._root)
214
214
215 @propertycache
215 @propertycache
216 def _checkcase(self):
216 def _checkcase(self):
217 return not util.fscasesensitive(self._join('.hg'))
217 return not util.fscasesensitive(self._join('.hg'))
218
218
219 def _join(self, f):
219 def _join(self, f):
220 # much faster than os.path.join()
220 # much faster than os.path.join()
221 # it's safe because f is always a relative path
221 # it's safe because f is always a relative path
222 return self._rootdir + f
222 return self._rootdir + f
223
223
224 def flagfunc(self, buildfallback):
224 def flagfunc(self, buildfallback):
225 if self._checklink and self._checkexec:
225 if self._checklink and self._checkexec:
226 def f(x):
226 def f(x):
227 try:
227 try:
228 st = os.lstat(self._join(x))
228 st = os.lstat(self._join(x))
229 if util.statislink(st):
229 if util.statislink(st):
230 return 'l'
230 return 'l'
231 if util.statisexec(st):
231 if util.statisexec(st):
232 return 'x'
232 return 'x'
233 except OSError:
233 except OSError:
234 pass
234 pass
235 return ''
235 return ''
236 return f
236 return f
237
237
238 fallback = buildfallback()
238 fallback = buildfallback()
239 if self._checklink:
239 if self._checklink:
240 def f(x):
240 def f(x):
241 if os.path.islink(self._join(x)):
241 if os.path.islink(self._join(x)):
242 return 'l'
242 return 'l'
243 if 'x' in fallback(x):
243 if 'x' in fallback(x):
244 return 'x'
244 return 'x'
245 return ''
245 return ''
246 return f
246 return f
247 if self._checkexec:
247 if self._checkexec:
248 def f(x):
248 def f(x):
249 if 'l' in fallback(x):
249 if 'l' in fallback(x):
250 return 'l'
250 return 'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return 'x'
252 return 'x'
253 return ''
253 return ''
254 return f
254 return f
255 else:
255 else:
256 return fallback
256 return fallback
257
257
258 @propertycache
258 @propertycache
259 def _cwd(self):
259 def _cwd(self):
260 return pycompat.getcwd()
260 return pycompat.getcwd()
261
261
262 def getcwd(self):
262 def getcwd(self):
263 '''Return the path from which a canonical path is calculated.
263 '''Return the path from which a canonical path is calculated.
264
264
265 This path should be used to resolve file patterns or to convert
265 This path should be used to resolve file patterns or to convert
266 canonical paths back to file paths for display. It shouldn't be
266 canonical paths back to file paths for display. It shouldn't be
267 used to get real file paths. Use vfs functions instead.
267 used to get real file paths. Use vfs functions instead.
268 '''
268 '''
269 cwd = self._cwd
269 cwd = self._cwd
270 if cwd == self._root:
270 if cwd == self._root:
271 return ''
271 return ''
272 # self._root ends with a path separator if self._root is '/' or 'C:\'
272 # self._root ends with a path separator if self._root is '/' or 'C:\'
273 rootsep = self._root
273 rootsep = self._root
274 if not util.endswithsep(rootsep):
274 if not util.endswithsep(rootsep):
275 rootsep += pycompat.ossep
275 rootsep += pycompat.ossep
276 if cwd.startswith(rootsep):
276 if cwd.startswith(rootsep):
277 return cwd[len(rootsep):]
277 return cwd[len(rootsep):]
278 else:
278 else:
279 # we're outside the repo. return an absolute path.
279 # we're outside the repo. return an absolute path.
280 return cwd
280 return cwd
281
281
282 def pathto(self, f, cwd=None):
282 def pathto(self, f, cwd=None):
283 if cwd is None:
283 if cwd is None:
284 cwd = self.getcwd()
284 cwd = self.getcwd()
285 path = util.pathto(self._root, cwd, f)
285 path = util.pathto(self._root, cwd, f)
286 if self._slash:
286 if self._slash:
287 return util.pconvert(path)
287 return util.pconvert(path)
288 return path
288 return path
289
289
290 def __getitem__(self, key):
290 def __getitem__(self, key):
291 '''Return the current state of key (a filename) in the dirstate.
291 '''Return the current state of key (a filename) in the dirstate.
292
292
293 States are:
293 States are:
294 n normal
294 n normal
295 m needs merging
295 m needs merging
296 r marked for removal
296 r marked for removal
297 a marked for addition
297 a marked for addition
298 ? not tracked
298 ? not tracked
299 '''
299 '''
300 return self._map.get(key, ("?",))[0]
300 return self._map.get(key, ("?",))[0]
301
301
302 def __contains__(self, key):
302 def __contains__(self, key):
303 return key in self._map
303 return key in self._map
304
304
305 def __iter__(self):
305 def __iter__(self):
306 for x in sorted(self._map):
306 for x in sorted(self._map):
307 yield x
307 yield x
308
308
309 def iteritems(self):
309 def iteritems(self):
310 return self._map.iteritems()
310 return self._map.iteritems()
311
311
312 def parents(self):
312 def parents(self):
313 return [self._validate(p) for p in self._pl]
313 return [self._validate(p) for p in self._pl]
314
314
315 def p1(self):
315 def p1(self):
316 return self._validate(self._pl[0])
316 return self._validate(self._pl[0])
317
317
318 def p2(self):
318 def p2(self):
319 return self._validate(self._pl[1])
319 return self._validate(self._pl[1])
320
320
321 def branch(self):
321 def branch(self):
322 return encoding.tolocal(self._branch)
322 return encoding.tolocal(self._branch)
323
323
324 def setparents(self, p1, p2=nullid):
324 def setparents(self, p1, p2=nullid):
325 """Set dirstate parents to p1 and p2.
325 """Set dirstate parents to p1 and p2.
326
326
327 When moving from two parents to one, 'm' merged entries a
327 When moving from two parents to one, 'm' merged entries a
328 adjusted to normal and previous copy records discarded and
328 adjusted to normal and previous copy records discarded and
329 returned by the call.
329 returned by the call.
330
330
331 See localrepo.setparents()
331 See localrepo.setparents()
332 """
332 """
333 if self._parentwriters == 0:
333 if self._parentwriters == 0:
334 raise ValueError("cannot set dirstate parent without "
334 raise ValueError("cannot set dirstate parent without "
335 "calling dirstate.beginparentchange")
335 "calling dirstate.beginparentchange")
336
336
337 self._dirty = self._dirtypl = True
337 self._dirty = self._dirtypl = True
338 oldp2 = self._pl[1]
338 oldp2 = self._pl[1]
339 if self._origpl is None:
339 if self._origpl is None:
340 self._origpl = self._pl
340 self._origpl = self._pl
341 self._pl = p1, p2
341 self._pl = p1, p2
342 copies = {}
342 copies = {}
343 if oldp2 != nullid and p2 == nullid:
343 if oldp2 != nullid and p2 == nullid:
344 for f, s in self._map.iteritems():
344 for f, s in self._map.iteritems():
345 # Discard 'm' markers when moving away from a merge state
345 # Discard 'm' markers when moving away from a merge state
346 if s[0] == 'm':
346 if s[0] == 'm':
347 if f in self._copymap:
347 if f in self._copymap:
348 copies[f] = self._copymap[f]
348 copies[f] = self._copymap[f]
349 self.normallookup(f)
349 self.normallookup(f)
350 # Also fix up otherparent markers
350 # Also fix up otherparent markers
351 elif s[0] == 'n' and s[2] == -2:
351 elif s[0] == 'n' and s[2] == -2:
352 if f in self._copymap:
352 if f in self._copymap:
353 copies[f] = self._copymap[f]
353 copies[f] = self._copymap[f]
354 self.add(f)
354 self.add(f)
355 return copies
355 return copies
356
356
357 def setbranch(self, branch):
357 def setbranch(self, branch):
358 self._branch = encoding.fromlocal(branch)
358 self._branch = encoding.fromlocal(branch)
359 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
359 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
360 try:
360 try:
361 f.write(self._branch + '\n')
361 f.write(self._branch + '\n')
362 f.close()
362 f.close()
363
363
364 # make sure filecache has the correct stat info for _branch after
364 # make sure filecache has the correct stat info for _branch after
365 # replacing the underlying file
365 # replacing the underlying file
366 ce = self._filecache['_branch']
366 ce = self._filecache['_branch']
367 if ce:
367 if ce:
368 ce.refresh()
368 ce.refresh()
369 except: # re-raises
369 except: # re-raises
370 f.discard()
370 f.discard()
371 raise
371 raise
372
372
373 def _opendirstatefile(self):
373 def _opendirstatefile(self):
374 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
374 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
375 if self._pendingmode is not None and self._pendingmode != mode:
375 if self._pendingmode is not None and self._pendingmode != mode:
376 fp.close()
376 fp.close()
377 raise error.Abort(_('working directory state may be '
377 raise error.Abort(_('working directory state may be '
378 'changed parallelly'))
378 'changed parallelly'))
379 self._pendingmode = mode
379 self._pendingmode = mode
380 return fp
380 return fp
381
381
382 def _read(self):
382 def _read(self):
383 self._map = {}
383 self._map = {}
384 self._copymap = {}
384 self._copymap = {}
385 try:
385 try:
386 fp = self._opendirstatefile()
386 fp = self._opendirstatefile()
387 try:
387 try:
388 st = fp.read()
388 st = fp.read()
389 finally:
389 finally:
390 fp.close()
390 fp.close()
391 except IOError as err:
391 except IOError as err:
392 if err.errno != errno.ENOENT:
392 if err.errno != errno.ENOENT:
393 raise
393 raise
394 return
394 return
395 if not st:
395 if not st:
396 return
396 return
397
397
398 if util.safehasattr(parsers, 'dict_new_presized'):
398 if util.safehasattr(parsers, 'dict_new_presized'):
399 # Make an estimate of the number of files in the dirstate based on
399 # Make an estimate of the number of files in the dirstate based on
400 # its size. From a linear regression on a set of real-world repos,
400 # its size. From a linear regression on a set of real-world repos,
401 # all over 10,000 files, the size of a dirstate entry is 85
401 # all over 10,000 files, the size of a dirstate entry is 85
402 # bytes. The cost of resizing is significantly higher than the cost
402 # bytes. The cost of resizing is significantly higher than the cost
403 # of filling in a larger presized dict, so subtract 20% from the
403 # of filling in a larger presized dict, so subtract 20% from the
404 # size.
404 # size.
405 #
405 #
406 # This heuristic is imperfect in many ways, so in a future dirstate
406 # This heuristic is imperfect in many ways, so in a future dirstate
407 # format update it makes sense to just record the number of entries
407 # format update it makes sense to just record the number of entries
408 # on write.
408 # on write.
409 self._map = parsers.dict_new_presized(len(st) / 71)
409 self._map = parsers.dict_new_presized(len(st) / 71)
410
410
411 # Python's garbage collector triggers a GC each time a certain number
411 # Python's garbage collector triggers a GC each time a certain number
412 # of container objects (the number being defined by
412 # of container objects (the number being defined by
413 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
413 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
414 # for each file in the dirstate. The C version then immediately marks
414 # for each file in the dirstate. The C version then immediately marks
415 # them as not to be tracked by the collector. However, this has no
415 # them as not to be tracked by the collector. However, this has no
416 # effect on when GCs are triggered, only on what objects the GC looks
416 # effect on when GCs are triggered, only on what objects the GC looks
417 # into. This means that O(number of files) GCs are unavoidable.
417 # into. This means that O(number of files) GCs are unavoidable.
418 # Depending on when in the process's lifetime the dirstate is parsed,
418 # Depending on when in the process's lifetime the dirstate is parsed,
419 # this can get very expensive. As a workaround, disable GC while
419 # this can get very expensive. As a workaround, disable GC while
420 # parsing the dirstate.
420 # parsing the dirstate.
421 #
421 #
422 # (we cannot decorate the function directly since it is in a C module)
422 # (we cannot decorate the function directly since it is in a C module)
423 parse_dirstate = util.nogc(parsers.parse_dirstate)
423 parse_dirstate = util.nogc(parsers.parse_dirstate)
424 p = parse_dirstate(self._map, self._copymap, st)
424 p = parse_dirstate(self._map, self._copymap, st)
425 if not self._dirtypl:
425 if not self._dirtypl:
426 self._pl = p
426 self._pl = p
427
427
428 def invalidate(self):
428 def invalidate(self):
429 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
429 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
430 "_pl", "_dirs", "_ignore", "_nonnormalset"):
430 "_pl", "_dirs", "_ignore", "_nonnormalset"):
431 if a in self.__dict__:
431 if a in self.__dict__:
432 delattr(self, a)
432 delattr(self, a)
433 self._lastnormaltime = 0
433 self._lastnormaltime = 0
434 self._dirty = False
434 self._dirty = False
435 self._updatedfiles.clear()
435 self._updatedfiles.clear()
436 self._parentwriters = 0
436 self._parentwriters = 0
437 self._origpl = None
437 self._origpl = None
438
438
439 def copy(self, source, dest):
439 def copy(self, source, dest):
440 """Mark dest as a copy of source. Unmark dest if source is None."""
440 """Mark dest as a copy of source. Unmark dest if source is None."""
441 if source == dest:
441 if source == dest:
442 return
442 return
443 self._dirty = True
443 self._dirty = True
444 if source is not None:
444 if source is not None:
445 self._copymap[dest] = source
445 self._copymap[dest] = source
446 self._updatedfiles.add(source)
446 self._updatedfiles.add(source)
447 self._updatedfiles.add(dest)
447 self._updatedfiles.add(dest)
448 elif dest in self._copymap:
448 elif dest in self._copymap:
449 del self._copymap[dest]
449 del self._copymap[dest]
450 self._updatedfiles.add(dest)
450 self._updatedfiles.add(dest)
451
451
452 def copied(self, file):
452 def copied(self, file):
453 return self._copymap.get(file, None)
453 return self._copymap.get(file, None)
454
454
455 def copies(self):
455 def copies(self):
456 return self._copymap
456 return self._copymap
457
457
458 def _droppath(self, f):
458 def _droppath(self, f):
459 if self[f] not in "?r" and "_dirs" in self.__dict__:
459 if self[f] not in "?r" and "_dirs" in self.__dict__:
460 self._dirs.delpath(f)
460 self._dirs.delpath(f)
461
461
462 if "_filefoldmap" in self.__dict__:
462 if "_filefoldmap" in self.__dict__:
463 normed = util.normcase(f)
463 normed = util.normcase(f)
464 if normed in self._filefoldmap:
464 if normed in self._filefoldmap:
465 del self._filefoldmap[normed]
465 del self._filefoldmap[normed]
466
466
467 self._updatedfiles.add(f)
467 self._updatedfiles.add(f)
468
468
469 def _addpath(self, f, state, mode, size, mtime):
469 def _addpath(self, f, state, mode, size, mtime):
470 oldstate = self[f]
470 oldstate = self[f]
471 if state == 'a' or oldstate == 'r':
471 if state == 'a' or oldstate == 'r':
472 scmutil.checkfilename(f)
472 scmutil.checkfilename(f)
473 if f in self._dirs:
473 if f in self._dirs:
474 raise error.Abort(_('directory %r already in dirstate') % f)
474 raise error.Abort(_('directory %r already in dirstate') % f)
475 # shadows
475 # shadows
476 for d in util.finddirs(f):
476 for d in util.finddirs(f):
477 if d in self._dirs:
477 if d in self._dirs:
478 break
478 break
479 if d in self._map and self[d] != 'r':
479 if d in self._map and self[d] != 'r':
480 raise error.Abort(
480 raise error.Abort(
481 _('file %r in dirstate clashes with %r') % (d, f))
481 _('file %r in dirstate clashes with %r') % (d, f))
482 if oldstate in "?r" and "_dirs" in self.__dict__:
482 if oldstate in "?r" and "_dirs" in self.__dict__:
483 self._dirs.addpath(f)
483 self._dirs.addpath(f)
484 self._dirty = True
484 self._dirty = True
485 self._updatedfiles.add(f)
485 self._updatedfiles.add(f)
486 self._map[f] = dirstatetuple(state, mode, size, mtime)
486 self._map[f] = dirstatetuple(state, mode, size, mtime)
487 if state != 'n' or mtime == -1:
487 if state != 'n' or mtime == -1:
488 self._nonnormalset.add(f)
488 self._nonnormalset.add(f)
489
489
490 def normal(self, f):
490 def normal(self, f):
491 '''Mark a file normal and clean.'''
491 '''Mark a file normal and clean.'''
492 s = os.lstat(self._join(f))
492 s = os.lstat(self._join(f))
493 mtime = s.st_mtime
493 mtime = s.st_mtime
494 self._addpath(f, 'n', s.st_mode,
494 self._addpath(f, 'n', s.st_mode,
495 s.st_size & _rangemask, mtime & _rangemask)
495 s.st_size & _rangemask, mtime & _rangemask)
496 if f in self._copymap:
496 if f in self._copymap:
497 del self._copymap[f]
497 del self._copymap[f]
498 if f in self._nonnormalset:
498 if f in self._nonnormalset:
499 self._nonnormalset.remove(f)
499 self._nonnormalset.remove(f)
500 if mtime > self._lastnormaltime:
500 if mtime > self._lastnormaltime:
501 # Remember the most recent modification timeslot for status(),
501 # Remember the most recent modification timeslot for status(),
502 # to make sure we won't miss future size-preserving file content
502 # to make sure we won't miss future size-preserving file content
503 # modifications that happen within the same timeslot.
503 # modifications that happen within the same timeslot.
504 self._lastnormaltime = mtime
504 self._lastnormaltime = mtime
505
505
506 def normallookup(self, f):
506 def normallookup(self, f):
507 '''Mark a file normal, but possibly dirty.'''
507 '''Mark a file normal, but possibly dirty.'''
508 if self._pl[1] != nullid and f in self._map:
508 if self._pl[1] != nullid and f in self._map:
509 # if there is a merge going on and the file was either
509 # if there is a merge going on and the file was either
510 # in state 'm' (-1) or coming from other parent (-2) before
510 # in state 'm' (-1) or coming from other parent (-2) before
511 # being removed, restore that state.
511 # being removed, restore that state.
512 entry = self._map[f]
512 entry = self._map[f]
513 if entry[0] == 'r' and entry[2] in (-1, -2):
513 if entry[0] == 'r' and entry[2] in (-1, -2):
514 source = self._copymap.get(f)
514 source = self._copymap.get(f)
515 if entry[2] == -1:
515 if entry[2] == -1:
516 self.merge(f)
516 self.merge(f)
517 elif entry[2] == -2:
517 elif entry[2] == -2:
518 self.otherparent(f)
518 self.otherparent(f)
519 if source:
519 if source:
520 self.copy(source, f)
520 self.copy(source, f)
521 return
521 return
522 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
522 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
523 return
523 return
524 self._addpath(f, 'n', 0, -1, -1)
524 self._addpath(f, 'n', 0, -1, -1)
525 if f in self._copymap:
525 if f in self._copymap:
526 del self._copymap[f]
526 del self._copymap[f]
527 if f in self._nonnormalset:
527 if f in self._nonnormalset:
528 self._nonnormalset.remove(f)
528 self._nonnormalset.remove(f)
529
529
530 def otherparent(self, f):
530 def otherparent(self, f):
531 '''Mark as coming from the other parent, always dirty.'''
531 '''Mark as coming from the other parent, always dirty.'''
532 if self._pl[1] == nullid:
532 if self._pl[1] == nullid:
533 raise error.Abort(_("setting %r to other parent "
533 raise error.Abort(_("setting %r to other parent "
534 "only allowed in merges") % f)
534 "only allowed in merges") % f)
535 if f in self and self[f] == 'n':
535 if f in self and self[f] == 'n':
536 # merge-like
536 # merge-like
537 self._addpath(f, 'm', 0, -2, -1)
537 self._addpath(f, 'm', 0, -2, -1)
538 else:
538 else:
539 # add-like
539 # add-like
540 self._addpath(f, 'n', 0, -2, -1)
540 self._addpath(f, 'n', 0, -2, -1)
541
541
542 if f in self._copymap:
542 if f in self._copymap:
543 del self._copymap[f]
543 del self._copymap[f]
544
544
545 def add(self, f):
545 def add(self, f):
546 '''Mark a file added.'''
546 '''Mark a file added.'''
547 self._addpath(f, 'a', 0, -1, -1)
547 self._addpath(f, 'a', 0, -1, -1)
548 if f in self._copymap:
548 if f in self._copymap:
549 del self._copymap[f]
549 del self._copymap[f]
550
550
551 def remove(self, f):
551 def remove(self, f):
552 '''Mark a file removed.'''
552 '''Mark a file removed.'''
553 self._dirty = True
553 self._dirty = True
554 self._droppath(f)
554 self._droppath(f)
555 size = 0
555 size = 0
556 if self._pl[1] != nullid and f in self._map:
556 if self._pl[1] != nullid and f in self._map:
557 # backup the previous state
557 # backup the previous state
558 entry = self._map[f]
558 entry = self._map[f]
559 if entry[0] == 'm': # merge
559 if entry[0] == 'm': # merge
560 size = -1
560 size = -1
561 elif entry[0] == 'n' and entry[2] == -2: # other parent
561 elif entry[0] == 'n' and entry[2] == -2: # other parent
562 size = -2
562 size = -2
563 self._map[f] = dirstatetuple('r', 0, size, 0)
563 self._map[f] = dirstatetuple('r', 0, size, 0)
564 self._nonnormalset.add(f)
564 self._nonnormalset.add(f)
565 if size == 0 and f in self._copymap:
565 if size == 0 and f in self._copymap:
566 del self._copymap[f]
566 del self._copymap[f]
567
567
568 def merge(self, f):
568 def merge(self, f):
569 '''Mark a file merged.'''
569 '''Mark a file merged.'''
570 if self._pl[1] == nullid:
570 if self._pl[1] == nullid:
571 return self.normallookup(f)
571 return self.normallookup(f)
572 return self.otherparent(f)
572 return self.otherparent(f)
573
573
574 def drop(self, f):
574 def drop(self, f):
575 '''Drop a file from the dirstate'''
575 '''Drop a file from the dirstate'''
576 if f in self._map:
576 if f in self._map:
577 self._dirty = True
577 self._dirty = True
578 self._droppath(f)
578 self._droppath(f)
579 del self._map[f]
579 del self._map[f]
580 if f in self._nonnormalset:
580 if f in self._nonnormalset:
581 self._nonnormalset.remove(f)
581 self._nonnormalset.remove(f)
582 if f in self._copymap:
582 if f in self._copymap:
583 del self._copymap[f]
583 del self._copymap[f]
584
584
585 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
585 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
586 if exists is None:
586 if exists is None:
587 exists = os.path.lexists(os.path.join(self._root, path))
587 exists = os.path.lexists(os.path.join(self._root, path))
588 if not exists:
588 if not exists:
589 # Maybe a path component exists
589 # Maybe a path component exists
590 if not ignoremissing and '/' in path:
590 if not ignoremissing and '/' in path:
591 d, f = path.rsplit('/', 1)
591 d, f = path.rsplit('/', 1)
592 d = self._normalize(d, False, ignoremissing, None)
592 d = self._normalize(d, False, ignoremissing, None)
593 folded = d + "/" + f
593 folded = d + "/" + f
594 else:
594 else:
595 # No path components, preserve original case
595 # No path components, preserve original case
596 folded = path
596 folded = path
597 else:
597 else:
598 # recursively normalize leading directory components
598 # recursively normalize leading directory components
599 # against dirstate
599 # against dirstate
600 if '/' in normed:
600 if '/' in normed:
601 d, f = normed.rsplit('/', 1)
601 d, f = normed.rsplit('/', 1)
602 d = self._normalize(d, False, ignoremissing, True)
602 d = self._normalize(d, False, ignoremissing, True)
603 r = self._root + "/" + d
603 r = self._root + "/" + d
604 folded = d + "/" + util.fspath(f, r)
604 folded = d + "/" + util.fspath(f, r)
605 else:
605 else:
606 folded = util.fspath(normed, self._root)
606 folded = util.fspath(normed, self._root)
607 storemap[normed] = folded
607 storemap[normed] = folded
608
608
609 return folded
609 return folded
610
610
611 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
611 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
612 normed = util.normcase(path)
612 normed = util.normcase(path)
613 folded = self._filefoldmap.get(normed, None)
613 folded = self._filefoldmap.get(normed, None)
614 if folded is None:
614 if folded is None:
615 if isknown:
615 if isknown:
616 folded = path
616 folded = path
617 else:
617 else:
618 folded = self._discoverpath(path, normed, ignoremissing, exists,
618 folded = self._discoverpath(path, normed, ignoremissing, exists,
619 self._filefoldmap)
619 self._filefoldmap)
620 return folded
620 return folded
621
621
622 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
622 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
623 normed = util.normcase(path)
623 normed = util.normcase(path)
624 folded = self._filefoldmap.get(normed, None)
624 folded = self._filefoldmap.get(normed, None)
625 if folded is None:
625 if folded is None:
626 folded = self._dirfoldmap.get(normed, None)
626 folded = self._dirfoldmap.get(normed, None)
627 if folded is None:
627 if folded is None:
628 if isknown:
628 if isknown:
629 folded = path
629 folded = path
630 else:
630 else:
631 # store discovered result in dirfoldmap so that future
631 # store discovered result in dirfoldmap so that future
632 # normalizefile calls don't start matching directories
632 # normalizefile calls don't start matching directories
633 folded = self._discoverpath(path, normed, ignoremissing, exists,
633 folded = self._discoverpath(path, normed, ignoremissing, exists,
634 self._dirfoldmap)
634 self._dirfoldmap)
635 return folded
635 return folded
636
636
637 def normalize(self, path, isknown=False, ignoremissing=False):
637 def normalize(self, path, isknown=False, ignoremissing=False):
638 '''
638 '''
639 normalize the case of a pathname when on a casefolding filesystem
639 normalize the case of a pathname when on a casefolding filesystem
640
640
641 isknown specifies whether the filename came from walking the
641 isknown specifies whether the filename came from walking the
642 disk, to avoid extra filesystem access.
642 disk, to avoid extra filesystem access.
643
643
644 If ignoremissing is True, missing path are returned
644 If ignoremissing is True, missing path are returned
645 unchanged. Otherwise, we try harder to normalize possibly
645 unchanged. Otherwise, we try harder to normalize possibly
646 existing path components.
646 existing path components.
647
647
648 The normalized case is determined based on the following precedence:
648 The normalized case is determined based on the following precedence:
649
649
650 - version of name already stored in the dirstate
650 - version of name already stored in the dirstate
651 - version of name stored on disk
651 - version of name stored on disk
652 - version provided via command arguments
652 - version provided via command arguments
653 '''
653 '''
654
654
655 if self._checkcase:
655 if self._checkcase:
656 return self._normalize(path, isknown, ignoremissing)
656 return self._normalize(path, isknown, ignoremissing)
657 return path
657 return path
658
658
659 def clear(self):
659 def clear(self):
660 self._map = {}
660 self._map = {}
661 self._nonnormalset = set()
661 self._nonnormalset = set()
662 if "_dirs" in self.__dict__:
662 if "_dirs" in self.__dict__:
663 delattr(self, "_dirs")
663 delattr(self, "_dirs")
664 self._copymap = {}
664 self._copymap = {}
665 self._pl = [nullid, nullid]
665 self._pl = [nullid, nullid]
666 self._lastnormaltime = 0
666 self._lastnormaltime = 0
667 self._updatedfiles.clear()
667 self._updatedfiles.clear()
668 self._dirty = True
668 self._dirty = True
669
669
670 def rebuild(self, parent, allfiles, changedfiles=None):
670 def rebuild(self, parent, allfiles, changedfiles=None):
671 if changedfiles is None:
671 if changedfiles is None:
672 # Rebuild entire dirstate
672 # Rebuild entire dirstate
673 changedfiles = allfiles
673 changedfiles = allfiles
674 lastnormaltime = self._lastnormaltime
674 lastnormaltime = self._lastnormaltime
675 self.clear()
675 self.clear()
676 self._lastnormaltime = lastnormaltime
676 self._lastnormaltime = lastnormaltime
677
677
678 if self._origpl is None:
678 if self._origpl is None:
679 self._origpl = self._pl
679 self._origpl = self._pl
680 self._pl = (parent, nullid)
680 self._pl = (parent, nullid)
681 for f in changedfiles:
681 for f in changedfiles:
682 if f in allfiles:
682 if f in allfiles:
683 self.normallookup(f)
683 self.normallookup(f)
684 else:
684 else:
685 self.drop(f)
685 self.drop(f)
686
686
687 self._dirty = True
687 self._dirty = True
688
688
689 def write(self, tr):
689 def write(self, tr):
690 if not self._dirty:
690 if not self._dirty:
691 return
691 return
692
692
693 filename = self._filename
693 filename = self._filename
694 if tr:
694 if tr:
695 # 'dirstate.write()' is not only for writing in-memory
695 # 'dirstate.write()' is not only for writing in-memory
696 # changes out, but also for dropping ambiguous timestamp.
696 # changes out, but also for dropping ambiguous timestamp.
697 # delayed writing re-raise "ambiguous timestamp issue".
697 # delayed writing re-raise "ambiguous timestamp issue".
698 # See also the wiki page below for detail:
698 # See also the wiki page below for detail:
699 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
699 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
700
700
701 # emulate dropping timestamp in 'parsers.pack_dirstate'
701 # emulate dropping timestamp in 'parsers.pack_dirstate'
702 now = _getfsnow(self._opener)
702 now = _getfsnow(self._opener)
703 dmap = self._map
703 dmap = self._map
704 for f in self._updatedfiles:
704 for f in self._updatedfiles:
705 e = dmap.get(f)
705 e = dmap.get(f)
706 if e is not None and e[0] == 'n' and e[3] == now:
706 if e is not None and e[0] == 'n' and e[3] == now:
707 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
707 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
708 self._nonnormalset.add(f)
708 self._nonnormalset.add(f)
709
709
710 # emulate that all 'dirstate.normal' results are written out
710 # emulate that all 'dirstate.normal' results are written out
711 self._lastnormaltime = 0
711 self._lastnormaltime = 0
712 self._updatedfiles.clear()
712 self._updatedfiles.clear()
713
713
714 # delay writing in-memory changes out
714 # delay writing in-memory changes out
715 tr.addfilegenerator('dirstate', (self._filename,),
715 tr.addfilegenerator('dirstate', (self._filename,),
716 self._writedirstate, location='plain')
716 self._writedirstate, location='plain')
717 return
717 return
718
718
719 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
719 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
720 self._writedirstate(st)
720 self._writedirstate(st)
721
721
722 def addparentchangecallback(self, category, callback):
722 def addparentchangecallback(self, category, callback):
723 """add a callback to be called when the wd parents are changed
723 """add a callback to be called when the wd parents are changed
724
724
725 Callback will be called with the following arguments:
725 Callback will be called with the following arguments:
726 dirstate, (oldp1, oldp2), (newp1, newp2)
726 dirstate, (oldp1, oldp2), (newp1, newp2)
727
727
728 Category is a unique identifier to allow overwriting an old callback
728 Category is a unique identifier to allow overwriting an old callback
729 with a newer callback.
729 with a newer callback.
730 """
730 """
731 self._plchangecallbacks[category] = callback
731 self._plchangecallbacks[category] = callback
732
732
733 def _writedirstate(self, st):
733 def _writedirstate(self, st):
734 # notify callbacks about parents change
734 # notify callbacks about parents change
735 if self._origpl is not None and self._origpl != self._pl:
735 if self._origpl is not None and self._origpl != self._pl:
736 for c, callback in sorted(self._plchangecallbacks.iteritems()):
736 for c, callback in sorted(self._plchangecallbacks.iteritems()):
737 callback(self, self._origpl, self._pl)
737 callback(self, self._origpl, self._pl)
738 self._origpl = None
738 self._origpl = None
739 # use the modification time of the newly created temporary file as the
739 # use the modification time of the newly created temporary file as the
740 # filesystem's notion of 'now'
740 # filesystem's notion of 'now'
741 now = util.fstat(st).st_mtime & _rangemask
741 now = util.fstat(st).st_mtime & _rangemask
742
742
743 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
743 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
744 # timestamp of each entries in dirstate, because of 'now > mtime'
744 # timestamp of each entries in dirstate, because of 'now > mtime'
745 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
745 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
746 if delaywrite > 0:
746 if delaywrite > 0:
747 # do we have any files to delay for?
747 # do we have any files to delay for?
748 for f, e in self._map.iteritems():
748 for f, e in self._map.iteritems():
749 if e[0] == 'n' and e[3] == now:
749 if e[0] == 'n' and e[3] == now:
750 import time # to avoid useless import
750 import time # to avoid useless import
751 # rather than sleep n seconds, sleep until the next
751 # rather than sleep n seconds, sleep until the next
752 # multiple of n seconds
752 # multiple of n seconds
753 clock = time.time()
753 clock = time.time()
754 start = int(clock) - (int(clock) % delaywrite)
754 start = int(clock) - (int(clock) % delaywrite)
755 end = start + delaywrite
755 end = start + delaywrite
756 time.sleep(end - clock)
756 time.sleep(end - clock)
757 now = end # trust our estimate that the end is near now
757 now = end # trust our estimate that the end is near now
758 break
758 break
759
759
760 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
760 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
761 self._nonnormalset = nonnormalentries(self._map)
761 self._nonnormalset = nonnormalentries(self._map)
762 st.close()
762 st.close()
763 self._lastnormaltime = 0
763 self._lastnormaltime = 0
764 self._dirty = self._dirtypl = False
764 self._dirty = self._dirtypl = False
765
765
766 def _dirignore(self, f):
766 def _dirignore(self, f):
767 if f == '.':
767 if f == '.':
768 return False
768 return False
769 if self._ignore(f):
769 if self._ignore(f):
770 return True
770 return True
771 for p in util.finddirs(f):
771 for p in util.finddirs(f):
772 if self._ignore(p):
772 if self._ignore(p):
773 return True
773 return True
774 return False
774 return False
775
775
776 def _ignorefiles(self):
776 def _ignorefiles(self):
777 files = []
777 files = []
778 if os.path.exists(self._join('.hgignore')):
778 if os.path.exists(self._join('.hgignore')):
779 files.append(self._join('.hgignore'))
779 files.append(self._join('.hgignore'))
780 for name, path in self._ui.configitems("ui"):
780 for name, path in self._ui.configitems("ui"):
781 if name == 'ignore' or name.startswith('ignore.'):
781 if name == 'ignore' or name.startswith('ignore.'):
782 # we need to use os.path.join here rather than self._join
782 # we need to use os.path.join here rather than self._join
783 # because path is arbitrary and user-specified
783 # because path is arbitrary and user-specified
784 files.append(os.path.join(self._rootdir, util.expandpath(path)))
784 files.append(os.path.join(self._rootdir, util.expandpath(path)))
785 return files
785 return files
786
786
787 def _ignorefileandline(self, f):
787 def _ignorefileandline(self, f):
788 files = collections.deque(self._ignorefiles())
788 files = collections.deque(self._ignorefiles())
789 visited = set()
789 visited = set()
790 while files:
790 while files:
791 i = files.popleft()
791 i = files.popleft()
792 patterns = matchmod.readpatternfile(i, self._ui.warn,
792 patterns = matchmod.readpatternfile(i, self._ui.warn,
793 sourceinfo=True)
793 sourceinfo=True)
794 for pattern, lineno, line in patterns:
794 for pattern, lineno, line in patterns:
795 kind, p = matchmod._patsplit(pattern, 'glob')
795 kind, p = matchmod._patsplit(pattern, 'glob')
796 if kind == "subinclude":
796 if kind == "subinclude":
797 if p not in visited:
797 if p not in visited:
798 files.append(p)
798 files.append(p)
799 continue
799 continue
800 m = matchmod.match(self._root, '', [], [pattern],
800 m = matchmod.match(self._root, '', [], [pattern],
801 warn=self._ui.warn)
801 warn=self._ui.warn)
802 if m(f):
802 if m(f):
803 return (i, lineno, line)
803 return (i, lineno, line)
804 visited.add(i)
804 visited.add(i)
805 return (None, -1, "")
805 return (None, -1, "")
806
806
807 def _walkexplicit(self, match, subrepos):
807 def _walkexplicit(self, match, subrepos):
808 '''Get stat data about the files explicitly specified by match.
808 '''Get stat data about the files explicitly specified by match.
809
809
810 Return a triple (results, dirsfound, dirsnotfound).
810 Return a triple (results, dirsfound, dirsnotfound).
811 - results is a mapping from filename to stat result. It also contains
811 - results is a mapping from filename to stat result. It also contains
812 listings mapping subrepos and .hg to None.
812 listings mapping subrepos and .hg to None.
813 - dirsfound is a list of files found to be directories.
813 - dirsfound is a list of files found to be directories.
814 - dirsnotfound is a list of files that the dirstate thinks are
814 - dirsnotfound is a list of files that the dirstate thinks are
815 directories and that were not found.'''
815 directories and that were not found.'''
816
816
817 def badtype(mode):
817 def badtype(mode):
818 kind = _('unknown')
818 kind = _('unknown')
819 if stat.S_ISCHR(mode):
819 if stat.S_ISCHR(mode):
820 kind = _('character device')
820 kind = _('character device')
821 elif stat.S_ISBLK(mode):
821 elif stat.S_ISBLK(mode):
822 kind = _('block device')
822 kind = _('block device')
823 elif stat.S_ISFIFO(mode):
823 elif stat.S_ISFIFO(mode):
824 kind = _('fifo')
824 kind = _('fifo')
825 elif stat.S_ISSOCK(mode):
825 elif stat.S_ISSOCK(mode):
826 kind = _('socket')
826 kind = _('socket')
827 elif stat.S_ISDIR(mode):
827 elif stat.S_ISDIR(mode):
828 kind = _('directory')
828 kind = _('directory')
829 return _('unsupported file type (type is %s)') % kind
829 return _('unsupported file type (type is %s)') % kind
830
830
831 matchedir = match.explicitdir
831 matchedir = match.explicitdir
832 badfn = match.bad
832 badfn = match.bad
833 dmap = self._map
833 dmap = self._map
834 lstat = os.lstat
834 lstat = os.lstat
835 getkind = stat.S_IFMT
835 getkind = stat.S_IFMT
836 dirkind = stat.S_IFDIR
836 dirkind = stat.S_IFDIR
837 regkind = stat.S_IFREG
837 regkind = stat.S_IFREG
838 lnkkind = stat.S_IFLNK
838 lnkkind = stat.S_IFLNK
839 join = self._join
839 join = self._join
840 dirsfound = []
840 dirsfound = []
841 foundadd = dirsfound.append
841 foundadd = dirsfound.append
842 dirsnotfound = []
842 dirsnotfound = []
843 notfoundadd = dirsnotfound.append
843 notfoundadd = dirsnotfound.append
844
844
845 if not match.isexact() and self._checkcase:
845 if not match.isexact() and self._checkcase:
846 normalize = self._normalize
846 normalize = self._normalize
847 else:
847 else:
848 normalize = None
848 normalize = None
849
849
850 files = sorted(match.files())
850 files = sorted(match.files())
851 subrepos.sort()
851 subrepos.sort()
852 i, j = 0, 0
852 i, j = 0, 0
853 while i < len(files) and j < len(subrepos):
853 while i < len(files) and j < len(subrepos):
854 subpath = subrepos[j] + "/"
854 subpath = subrepos[j] + "/"
855 if files[i] < subpath:
855 if files[i] < subpath:
856 i += 1
856 i += 1
857 continue
857 continue
858 while i < len(files) and files[i].startswith(subpath):
858 while i < len(files) and files[i].startswith(subpath):
859 del files[i]
859 del files[i]
860 j += 1
860 j += 1
861
861
862 if not files or '.' in files:
862 if not files or '.' in files:
863 files = ['.']
863 files = ['.']
864 results = dict.fromkeys(subrepos)
864 results = dict.fromkeys(subrepos)
865 results['.hg'] = None
865 results['.hg'] = None
866
866
867 alldirs = None
867 alldirs = None
868 for ff in files:
868 for ff in files:
869 # constructing the foldmap is expensive, so don't do it for the
869 # constructing the foldmap is expensive, so don't do it for the
870 # common case where files is ['.']
870 # common case where files is ['.']
871 if normalize and ff != '.':
871 if normalize and ff != '.':
872 nf = normalize(ff, False, True)
872 nf = normalize(ff, False, True)
873 else:
873 else:
874 nf = ff
874 nf = ff
875 if nf in results:
875 if nf in results:
876 continue
876 continue
877
877
878 try:
878 try:
879 st = lstat(join(nf))
879 st = lstat(join(nf))
880 kind = getkind(st.st_mode)
880 kind = getkind(st.st_mode)
881 if kind == dirkind:
881 if kind == dirkind:
882 if nf in dmap:
882 if nf in dmap:
883 # file replaced by dir on disk but still in dirstate
883 # file replaced by dir on disk but still in dirstate
884 results[nf] = None
884 results[nf] = None
885 if matchedir:
885 if matchedir:
886 matchedir(nf)
886 matchedir(nf)
887 foundadd((nf, ff))
887 foundadd((nf, ff))
888 elif kind == regkind or kind == lnkkind:
888 elif kind == regkind or kind == lnkkind:
889 results[nf] = st
889 results[nf] = st
890 else:
890 else:
891 badfn(ff, badtype(kind))
891 badfn(ff, badtype(kind))
892 if nf in dmap:
892 if nf in dmap:
893 results[nf] = None
893 results[nf] = None
894 except OSError as inst: # nf not found on disk - it is dirstate only
894 except OSError as inst: # nf not found on disk - it is dirstate only
895 if nf in dmap: # does it exactly match a missing file?
895 if nf in dmap: # does it exactly match a missing file?
896 results[nf] = None
896 results[nf] = None
897 else: # does it match a missing directory?
897 else: # does it match a missing directory?
898 if alldirs is None:
898 if alldirs is None:
899 alldirs = util.dirs(dmap)
899 alldirs = util.dirs(dmap)
900 if nf in alldirs:
900 if nf in alldirs:
901 if matchedir:
901 if matchedir:
902 matchedir(nf)
902 matchedir(nf)
903 notfoundadd(nf)
903 notfoundadd(nf)
904 else:
904 else:
905 badfn(ff, inst.strerror)
905 badfn(ff, inst.strerror)
906
906
907 # Case insensitive filesystems cannot rely on lstat() failing to detect
907 # Case insensitive filesystems cannot rely on lstat() failing to detect
908 # a case-only rename. Prune the stat object for any file that does not
908 # a case-only rename. Prune the stat object for any file that does not
909 # match the case in the filesystem, if there are multiple files that
909 # match the case in the filesystem, if there are multiple files that
910 # normalize to the same path.
910 # normalize to the same path.
911 if match.isexact() and self._checkcase:
911 if match.isexact() and self._checkcase:
912 normed = {}
912 normed = {}
913
913
914 for f, st in results.iteritems():
914 for f, st in results.iteritems():
915 if st is None:
915 if st is None:
916 continue
916 continue
917
917
918 nc = util.normcase(f)
918 nc = util.normcase(f)
919 paths = normed.get(nc)
919 paths = normed.get(nc)
920
920
921 if paths is None:
921 if paths is None:
922 paths = set()
922 paths = set()
923 normed[nc] = paths
923 normed[nc] = paths
924
924
925 paths.add(f)
925 paths.add(f)
926
926
927 for norm, paths in normed.iteritems():
927 for norm, paths in normed.iteritems():
928 if len(paths) > 1:
928 if len(paths) > 1:
929 for path in paths:
929 for path in paths:
930 folded = self._discoverpath(path, norm, True, None,
930 folded = self._discoverpath(path, norm, True, None,
931 self._dirfoldmap)
931 self._dirfoldmap)
932 if path != folded:
932 if path != folded:
933 results[path] = None
933 results[path] = None
934
934
935 return results, dirsfound, dirsnotfound
935 return results, dirsfound, dirsnotfound
936
936
937 def walk(self, match, subrepos, unknown, ignored, full=True):
937 def walk(self, match, subrepos, unknown, ignored, full=True):
938 '''
938 '''
939 Walk recursively through the directory tree, finding all files
939 Walk recursively through the directory tree, finding all files
940 matched by match.
940 matched by match.
941
941
942 If full is False, maybe skip some known-clean files.
942 If full is False, maybe skip some known-clean files.
943
943
944 Return a dict mapping filename to stat-like object (either
944 Return a dict mapping filename to stat-like object (either
945 mercurial.osutil.stat instance or return value of os.stat()).
945 mercurial.osutil.stat instance or return value of os.stat()).
946
946
947 '''
947 '''
948 # full is a flag that extensions that hook into walk can use -- this
948 # full is a flag that extensions that hook into walk can use -- this
949 # implementation doesn't use it at all. This satisfies the contract
949 # implementation doesn't use it at all. This satisfies the contract
950 # because we only guarantee a "maybe".
950 # because we only guarantee a "maybe".
951
951
952 if ignored:
952 if ignored:
953 ignore = util.never
953 ignore = util.never
954 dirignore = util.never
954 dirignore = util.never
955 elif unknown:
955 elif unknown:
956 ignore = self._ignore
956 ignore = self._ignore
957 dirignore = self._dirignore
957 dirignore = self._dirignore
958 else:
958 else:
959 # if not unknown and not ignored, drop dir recursion and step 2
959 # if not unknown and not ignored, drop dir recursion and step 2
960 ignore = util.always
960 ignore = util.always
961 dirignore = util.always
961 dirignore = util.always
962
962
963 matchfn = match.matchfn
963 matchfn = match.matchfn
964 matchalways = match.always()
964 matchalways = match.always()
965 matchtdir = match.traversedir
965 matchtdir = match.traversedir
966 dmap = self._map
966 dmap = self._map
967 listdir = osutil.listdir
967 listdir = osutil.listdir
968 lstat = os.lstat
968 lstat = os.lstat
969 dirkind = stat.S_IFDIR
969 dirkind = stat.S_IFDIR
970 regkind = stat.S_IFREG
970 regkind = stat.S_IFREG
971 lnkkind = stat.S_IFLNK
971 lnkkind = stat.S_IFLNK
972 join = self._join
972 join = self._join
973
973
974 exact = skipstep3 = False
974 exact = skipstep3 = False
975 if match.isexact(): # match.exact
975 if match.isexact(): # match.exact
976 exact = True
976 exact = True
977 dirignore = util.always # skip step 2
977 dirignore = util.always # skip step 2
978 elif match.prefix(): # match.match, no patterns
978 elif match.prefix(): # match.match, no patterns
979 skipstep3 = True
979 skipstep3 = True
980
980
981 if not exact and self._checkcase:
981 if not exact and self._checkcase:
982 normalize = self._normalize
982 normalize = self._normalize
983 normalizefile = self._normalizefile
983 normalizefile = self._normalizefile
984 skipstep3 = False
984 skipstep3 = False
985 else:
985 else:
986 normalize = self._normalize
986 normalize = self._normalize
987 normalizefile = None
987 normalizefile = None
988
988
989 # step 1: find all explicit files
989 # step 1: find all explicit files
990 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
990 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
991
991
992 skipstep3 = skipstep3 and not (work or dirsnotfound)
992 skipstep3 = skipstep3 and not (work or dirsnotfound)
993 work = [d for d in work if not dirignore(d[0])]
993 work = [d for d in work if not dirignore(d[0])]
994
994
995 # step 2: visit subdirectories
995 # step 2: visit subdirectories
996 def traverse(work, alreadynormed):
996 def traverse(work, alreadynormed):
997 wadd = work.append
997 wadd = work.append
998 while work:
998 while work:
999 nd = work.pop()
999 nd = work.pop()
1000 skip = None
1000 skip = None
1001 if nd == '.':
1001 if nd == '.':
1002 nd = ''
1002 nd = ''
1003 else:
1003 else:
1004 skip = '.hg'
1004 skip = '.hg'
1005 try:
1005 try:
1006 entries = listdir(join(nd), stat=True, skip=skip)
1006 entries = listdir(join(nd), stat=True, skip=skip)
1007 except OSError as inst:
1007 except OSError as inst:
1008 if inst.errno in (errno.EACCES, errno.ENOENT):
1008 if inst.errno in (errno.EACCES, errno.ENOENT):
1009 match.bad(self.pathto(nd), inst.strerror)
1009 match.bad(self.pathto(nd), inst.strerror)
1010 continue
1010 continue
1011 raise
1011 raise
1012 for f, kind, st in entries:
1012 for f, kind, st in entries:
1013 if normalizefile:
1013 if normalizefile:
1014 # even though f might be a directory, we're only
1014 # even though f might be a directory, we're only
1015 # interested in comparing it to files currently in the
1015 # interested in comparing it to files currently in the
1016 # dmap -- therefore normalizefile is enough
1016 # dmap -- therefore normalizefile is enough
1017 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1017 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1018 True)
1018 True)
1019 else:
1019 else:
1020 nf = nd and (nd + "/" + f) or f
1020 nf = nd and (nd + "/" + f) or f
1021 if nf not in results:
1021 if nf not in results:
1022 if kind == dirkind:
1022 if kind == dirkind:
1023 if not ignore(nf):
1023 if not ignore(nf):
1024 if matchtdir:
1024 if matchtdir:
1025 matchtdir(nf)
1025 matchtdir(nf)
1026 wadd(nf)
1026 wadd(nf)
1027 if nf in dmap and (matchalways or matchfn(nf)):
1027 if nf in dmap and (matchalways or matchfn(nf)):
1028 results[nf] = None
1028 results[nf] = None
1029 elif kind == regkind or kind == lnkkind:
1029 elif kind == regkind or kind == lnkkind:
1030 if nf in dmap:
1030 if nf in dmap:
1031 if matchalways or matchfn(nf):
1031 if matchalways or matchfn(nf):
1032 results[nf] = st
1032 results[nf] = st
1033 elif ((matchalways or matchfn(nf))
1033 elif ((matchalways or matchfn(nf))
1034 and not ignore(nf)):
1034 and not ignore(nf)):
1035 # unknown file -- normalize if necessary
1035 # unknown file -- normalize if necessary
1036 if not alreadynormed:
1036 if not alreadynormed:
1037 nf = normalize(nf, False, True)
1037 nf = normalize(nf, False, True)
1038 results[nf] = st
1038 results[nf] = st
1039 elif nf in dmap and (matchalways or matchfn(nf)):
1039 elif nf in dmap and (matchalways or matchfn(nf)):
1040 results[nf] = None
1040 results[nf] = None
1041
1041
1042 for nd, d in work:
1042 for nd, d in work:
1043 # alreadynormed means that processwork doesn't have to do any
1043 # alreadynormed means that processwork doesn't have to do any
1044 # expensive directory normalization
1044 # expensive directory normalization
1045 alreadynormed = not normalize or nd == d
1045 alreadynormed = not normalize or nd == d
1046 traverse([d], alreadynormed)
1046 traverse([d], alreadynormed)
1047
1047
1048 for s in subrepos:
1048 for s in subrepos:
1049 del results[s]
1049 del results[s]
1050 del results['.hg']
1050 del results['.hg']
1051
1051
1052 # step 3: visit remaining files from dmap
1052 # step 3: visit remaining files from dmap
1053 if not skipstep3 and not exact:
1053 if not skipstep3 and not exact:
1054 # If a dmap file is not in results yet, it was either
1054 # If a dmap file is not in results yet, it was either
1055 # a) not matching matchfn b) ignored, c) missing, or d) under a
1055 # a) not matching matchfn b) ignored, c) missing, or d) under a
1056 # symlink directory.
1056 # symlink directory.
1057 if not results and matchalways:
1057 if not results and matchalways:
1058 visit = dmap.keys()
1058 visit = dmap.keys()
1059 else:
1059 else:
1060 visit = [f for f in dmap if f not in results and matchfn(f)]
1060 visit = [f for f in dmap if f not in results and matchfn(f)]
1061 visit.sort()
1061 visit.sort()
1062
1062
1063 if unknown:
1063 if unknown:
1064 # unknown == True means we walked all dirs under the roots
1064 # unknown == True means we walked all dirs under the roots
1065 # that wasn't ignored, and everything that matched was stat'ed
1065 # that wasn't ignored, and everything that matched was stat'ed
1066 # and is already in results.
1066 # and is already in results.
1067 # The rest must thus be ignored or under a symlink.
1067 # The rest must thus be ignored or under a symlink.
1068 audit_path = pathutil.pathauditor(self._root)
1068 audit_path = pathutil.pathauditor(self._root)
1069
1069
1070 for nf in iter(visit):
1070 for nf in iter(visit):
1071 # If a stat for the same file was already added with a
1071 # If a stat for the same file was already added with a
1072 # different case, don't add one for this, since that would
1072 # different case, don't add one for this, since that would
1073 # make it appear as if the file exists under both names
1073 # make it appear as if the file exists under both names
1074 # on disk.
1074 # on disk.
1075 if (normalizefile and
1075 if (normalizefile and
1076 normalizefile(nf, True, True) in results):
1076 normalizefile(nf, True, True) in results):
1077 results[nf] = None
1077 results[nf] = None
1078 # Report ignored items in the dmap as long as they are not
1078 # Report ignored items in the dmap as long as they are not
1079 # under a symlink directory.
1079 # under a symlink directory.
1080 elif audit_path.check(nf):
1080 elif audit_path.check(nf):
1081 try:
1081 try:
1082 results[nf] = lstat(join(nf))
1082 results[nf] = lstat(join(nf))
1083 # file was just ignored, no links, and exists
1083 # file was just ignored, no links, and exists
1084 except OSError:
1084 except OSError:
1085 # file doesn't exist
1085 # file doesn't exist
1086 results[nf] = None
1086 results[nf] = None
1087 else:
1087 else:
1088 # It's either missing or under a symlink directory
1088 # It's either missing or under a symlink directory
1089 # which we in this case report as missing
1089 # which we in this case report as missing
1090 results[nf] = None
1090 results[nf] = None
1091 else:
1091 else:
1092 # We may not have walked the full directory tree above,
1092 # We may not have walked the full directory tree above,
1093 # so stat and check everything we missed.
1093 # so stat and check everything we missed.
1094 nf = iter(visit).next
1094 nf = iter(visit).next
1095 for st in util.statfiles([join(i) for i in visit]):
1095 for st in util.statfiles([join(i) for i in visit]):
1096 results[nf()] = st
1096 results[nf()] = st
1097 return results
1097 return results
1098
1098
1099 def status(self, match, subrepos, ignored, clean, unknown):
1099 def status(self, match, subrepos, ignored, clean, unknown):
1100 '''Determine the status of the working copy relative to the
1100 '''Determine the status of the working copy relative to the
1101 dirstate and return a pair of (unsure, status), where status is of type
1101 dirstate and return a pair of (unsure, status), where status is of type
1102 scmutil.status and:
1102 scmutil.status and:
1103
1103
1104 unsure:
1104 unsure:
1105 files that might have been modified since the dirstate was
1105 files that might have been modified since the dirstate was
1106 written, but need to be read to be sure (size is the same
1106 written, but need to be read to be sure (size is the same
1107 but mtime differs)
1107 but mtime differs)
1108 status.modified:
1108 status.modified:
1109 files that have definitely been modified since the dirstate
1109 files that have definitely been modified since the dirstate
1110 was written (different size or mode)
1110 was written (different size or mode)
1111 status.clean:
1111 status.clean:
1112 files that have definitely not been modified since the
1112 files that have definitely not been modified since the
1113 dirstate was written
1113 dirstate was written
1114 '''
1114 '''
1115 listignored, listclean, listunknown = ignored, clean, unknown
1115 listignored, listclean, listunknown = ignored, clean, unknown
1116 lookup, modified, added, unknown, ignored = [], [], [], [], []
1116 lookup, modified, added, unknown, ignored = [], [], [], [], []
1117 removed, deleted, clean = [], [], []
1117 removed, deleted, clean = [], [], []
1118
1118
1119 dmap = self._map
1119 dmap = self._map
1120 ladd = lookup.append # aka "unsure"
1120 ladd = lookup.append # aka "unsure"
1121 madd = modified.append
1121 madd = modified.append
1122 aadd = added.append
1122 aadd = added.append
1123 uadd = unknown.append
1123 uadd = unknown.append
1124 iadd = ignored.append
1124 iadd = ignored.append
1125 radd = removed.append
1125 radd = removed.append
1126 dadd = deleted.append
1126 dadd = deleted.append
1127 cadd = clean.append
1127 cadd = clean.append
1128 mexact = match.exact
1128 mexact = match.exact
1129 dirignore = self._dirignore
1129 dirignore = self._dirignore
1130 checkexec = self._checkexec
1130 checkexec = self._checkexec
1131 copymap = self._copymap
1131 copymap = self._copymap
1132 lastnormaltime = self._lastnormaltime
1132 lastnormaltime = self._lastnormaltime
1133
1133
1134 # We need to do full walks when either
1134 # We need to do full walks when either
1135 # - we're listing all clean files, or
1135 # - we're listing all clean files, or
1136 # - match.traversedir does something, because match.traversedir should
1136 # - match.traversedir does something, because match.traversedir should
1137 # be called for every dir in the working dir
1137 # be called for every dir in the working dir
1138 full = listclean or match.traversedir is not None
1138 full = listclean or match.traversedir is not None
1139 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1139 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1140 full=full).iteritems():
1140 full=full).iteritems():
1141 if fn not in dmap:
1141 if fn not in dmap:
1142 if (listignored or mexact(fn)) and dirignore(fn):
1142 if (listignored or mexact(fn)) and dirignore(fn):
1143 if listignored:
1143 if listignored:
1144 iadd(fn)
1144 iadd(fn)
1145 else:
1145 else:
1146 uadd(fn)
1146 uadd(fn)
1147 continue
1147 continue
1148
1148
1149 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1149 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1150 # written like that for performance reasons. dmap[fn] is not a
1150 # written like that for performance reasons. dmap[fn] is not a
1151 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1151 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1152 # opcode has fast paths when the value to be unpacked is a tuple or
1152 # opcode has fast paths when the value to be unpacked is a tuple or
1153 # a list, but falls back to creating a full-fledged iterator in
1153 # a list, but falls back to creating a full-fledged iterator in
1154 # general. That is much slower than simply accessing and storing the
1154 # general. That is much slower than simply accessing and storing the
1155 # tuple members one by one.
1155 # tuple members one by one.
1156 t = dmap[fn]
1156 t = dmap[fn]
1157 state = t[0]
1157 state = t[0]
1158 mode = t[1]
1158 mode = t[1]
1159 size = t[2]
1159 size = t[2]
1160 time = t[3]
1160 time = t[3]
1161
1161
1162 if not st and state in "nma":
1162 if not st and state in "nma":
1163 dadd(fn)
1163 dadd(fn)
1164 elif state == 'n':
1164 elif state == 'n':
1165 if (size >= 0 and
1165 if (size >= 0 and
1166 ((size != st.st_size and size != st.st_size & _rangemask)
1166 ((size != st.st_size and size != st.st_size & _rangemask)
1167 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1167 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1168 or size == -2 # other parent
1168 or size == -2 # other parent
1169 or fn in copymap):
1169 or fn in copymap):
1170 madd(fn)
1170 madd(fn)
1171 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1171 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1172 ladd(fn)
1172 ladd(fn)
1173 elif st.st_mtime == lastnormaltime:
1173 elif st.st_mtime == lastnormaltime:
1174 # fn may have just been marked as normal and it may have
1174 # fn may have just been marked as normal and it may have
1175 # changed in the same second without changing its size.
1175 # changed in the same second without changing its size.
1176 # This can happen if we quickly do multiple commits.
1176 # This can happen if we quickly do multiple commits.
1177 # Force lookup, so we don't miss such a racy file change.
1177 # Force lookup, so we don't miss such a racy file change.
1178 ladd(fn)
1178 ladd(fn)
1179 elif listclean:
1179 elif listclean:
1180 cadd(fn)
1180 cadd(fn)
1181 elif state == 'm':
1181 elif state == 'm':
1182 madd(fn)
1182 madd(fn)
1183 elif state == 'a':
1183 elif state == 'a':
1184 aadd(fn)
1184 aadd(fn)
1185 elif state == 'r':
1185 elif state == 'r':
1186 radd(fn)
1186 radd(fn)
1187
1187
1188 return (lookup, scmutil.status(modified, added, removed, deleted,
1188 return (lookup, scmutil.status(modified, added, removed, deleted,
1189 unknown, ignored, clean))
1189 unknown, ignored, clean))
1190
1190
1191 def matches(self, match):
1191 def matches(self, match):
1192 '''
1192 '''
1193 return files in the dirstate (in whatever state) filtered by match
1193 return files in the dirstate (in whatever state) filtered by match
1194 '''
1194 '''
1195 dmap = self._map
1195 dmap = self._map
1196 if match.always():
1196 if match.always():
1197 return dmap.keys()
1197 return dmap.keys()
1198 files = match.files()
1198 files = match.files()
1199 if match.isexact():
1199 if match.isexact():
1200 # fast path -- filter the other way around, since typically files is
1200 # fast path -- filter the other way around, since typically files is
1201 # much smaller than dmap
1201 # much smaller than dmap
1202 return [f for f in files if f in dmap]
1202 return [f for f in files if f in dmap]
1203 if match.prefix() and all(fn in dmap for fn in files):
1203 if match.prefix() and all(fn in dmap for fn in files):
1204 # fast path -- all the values are known to be files, so just return
1204 # fast path -- all the values are known to be files, so just return
1205 # that
1205 # that
1206 return list(files)
1206 return list(files)
1207 return [f for f in dmap if match(f)]
1207 return [f for f in dmap if match(f)]
1208
1208
1209 def _actualfilename(self, tr):
1209 def _actualfilename(self, tr):
1210 if tr:
1210 if tr:
1211 return self._pendingfilename
1211 return self._pendingfilename
1212 else:
1212 else:
1213 return self._filename
1213 return self._filename
1214
1214
1215 def savebackup(self, tr, suffix='', prefix=''):
1215 def savebackup(self, tr, suffix='', prefix=''):
1216 '''Save current dirstate into backup file with suffix'''
1216 '''Save current dirstate into backup file with suffix'''
1217 assert len(suffix) > 0 or len(prefix) > 0
1217 assert len(suffix) > 0 or len(prefix) > 0
1218 filename = self._actualfilename(tr)
1218 filename = self._actualfilename(tr)
1219
1219
1220 # use '_writedirstate' instead of 'write' to write changes certainly,
1220 # use '_writedirstate' instead of 'write' to write changes certainly,
1221 # because the latter omits writing out if transaction is running.
1221 # because the latter omits writing out if transaction is running.
1222 # output file will be used to create backup of dirstate at this point.
1222 # output file will be used to create backup of dirstate at this point.
1223 if self._dirty or not self._opener.exists(filename):
1223 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1224 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1224 checkambig=True))
1225 checkambig=True))
1225
1226
1226 if tr:
1227 if tr:
1227 # ensure that subsequent tr.writepending returns True for
1228 # ensure that subsequent tr.writepending returns True for
1228 # changes written out above, even if dirstate is never
1229 # changes written out above, even if dirstate is never
1229 # changed after this
1230 # changed after this
1230 tr.addfilegenerator('dirstate', (self._filename,),
1231 tr.addfilegenerator('dirstate', (self._filename,),
1231 self._writedirstate, location='plain')
1232 self._writedirstate, location='plain')
1232
1233
1233 # ensure that pending file written above is unlinked at
1234 # ensure that pending file written above is unlinked at
1234 # failure, even if tr.writepending isn't invoked until the
1235 # failure, even if tr.writepending isn't invoked until the
1235 # end of this transaction
1236 # end of this transaction
1236 tr.registertmp(filename, location='plain')
1237 tr.registertmp(filename, location='plain')
1237
1238
1238 backupname = prefix + self._filename + suffix
1239 backupname = prefix + self._filename + suffix
1239 assert backupname != filename
1240 assert backupname != filename
1240 if self._opener.exists(backupname):
1241 if self._opener.exists(backupname):
1241 self._opener.unlink(backupname)
1242 self._opener.unlink(backupname)
1242 # hardlink backup is okay because _writedirstate is always called
1243 # hardlink backup is okay because _writedirstate is always called
1243 # with an "atomictemp=True" file.
1244 # with an "atomictemp=True" file.
1244 util.copyfile(self._opener.join(filename),
1245 util.copyfile(self._opener.join(filename),
1245 self._opener.join(backupname), hardlink=True)
1246 self._opener.join(backupname), hardlink=True)
1246
1247
1247 def restorebackup(self, tr, suffix='', prefix=''):
1248 def restorebackup(self, tr, suffix='', prefix=''):
1248 '''Restore dirstate by backup file with suffix'''
1249 '''Restore dirstate by backup file with suffix'''
1249 assert len(suffix) > 0 or len(prefix) > 0
1250 assert len(suffix) > 0 or len(prefix) > 0
1250 # this "invalidate()" prevents "wlock.release()" from writing
1251 # this "invalidate()" prevents "wlock.release()" from writing
1251 # changes of dirstate out after restoring from backup file
1252 # changes of dirstate out after restoring from backup file
1252 self.invalidate()
1253 self.invalidate()
1253 filename = self._actualfilename(tr)
1254 filename = self._actualfilename(tr)
1254 # using self._filename to avoid having "pending" in the backup filename
1255 # using self._filename to avoid having "pending" in the backup filename
1255 self._opener.rename(prefix + self._filename + suffix, filename,
1256 self._opener.rename(prefix + self._filename + suffix, filename,
1256 checkambig=True)
1257 checkambig=True)
1257
1258
1258 def clearbackup(self, tr, suffix='', prefix=''):
1259 def clearbackup(self, tr, suffix='', prefix=''):
1259 '''Clear backup file with suffix'''
1260 '''Clear backup file with suffix'''
1260 assert len(suffix) > 0 or len(prefix) > 0
1261 assert len(suffix) > 0 or len(prefix) > 0
1261 # using self._filename to avoid having "pending" in the backup filename
1262 # using self._filename to avoid having "pending" in the backup filename
1262 self._opener.unlink(prefix + self._filename + suffix)
1263 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now