##// END OF EJS Templates
dirstate: track updated files to improve write time...
Durham Goode -
r31206:49e5491e default
parent child Browse files
Show More
@@ -1,1245 +1,1256 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 propertycache = util.propertycache
30 propertycache = util.propertycache
31 filecache = scmutil.filecache
31 filecache = scmutil.filecache
32 _rangemask = 0x7fffffff
32 _rangemask = 0x7fffffff
33
33
34 dirstatetuple = parsers.dirstatetuple
34 dirstatetuple = parsers.dirstatetuple
35
35
36 class repocache(filecache):
36 class repocache(filecache):
37 """filecache for files in .hg/"""
37 """filecache for files in .hg/"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj._opener.join(fname)
39 return obj._opener.join(fname)
40
40
41 class rootcache(filecache):
41 class rootcache(filecache):
42 """filecache for files in the repository root"""
42 """filecache for files in the repository root"""
43 def join(self, obj, fname):
43 def join(self, obj, fname):
44 return obj._join(fname)
44 return obj._join(fname)
45
45
46 def _getfsnow(vfs):
46 def _getfsnow(vfs):
47 '''Get "now" timestamp on filesystem'''
47 '''Get "now" timestamp on filesystem'''
48 tmpfd, tmpname = vfs.mkstemp()
48 tmpfd, tmpname = vfs.mkstemp()
49 try:
49 try:
50 return os.fstat(tmpfd).st_mtime
50 return os.fstat(tmpfd).st_mtime
51 finally:
51 finally:
52 os.close(tmpfd)
52 os.close(tmpfd)
53 vfs.unlink(tmpname)
53 vfs.unlink(tmpname)
54
54
55 def nonnormalentries(dmap):
55 def nonnormalentries(dmap):
56 '''Compute the nonnormal dirstate entries from the dmap'''
56 '''Compute the nonnormal dirstate entries from the dmap'''
57 try:
57 try:
58 return parsers.nonnormalentries(dmap)
58 return parsers.nonnormalentries(dmap)
59 except AttributeError:
59 except AttributeError:
60 return set(fname for fname, e in dmap.iteritems()
60 return set(fname for fname, e in dmap.iteritems()
61 if e[0] != 'n' or e[3] == -1)
61 if e[0] != 'n' or e[3] == -1)
62
62
63 class dirstate(object):
63 class dirstate(object):
64
64
65 def __init__(self, opener, ui, root, validate):
65 def __init__(self, opener, ui, root, validate):
66 '''Create a new dirstate object.
66 '''Create a new dirstate object.
67
67
68 opener is an open()-like callable that can be used to open the
68 opener is an open()-like callable that can be used to open the
69 dirstate file; root is the root of the directory tracked by
69 dirstate file; root is the root of the directory tracked by
70 the dirstate.
70 the dirstate.
71 '''
71 '''
72 self._opener = opener
72 self._opener = opener
73 self._validate = validate
73 self._validate = validate
74 self._root = root
74 self._root = root
75 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
75 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
76 # UNC path pointing to root share (issue4557)
76 # UNC path pointing to root share (issue4557)
77 self._rootdir = pathutil.normasprefix(root)
77 self._rootdir = pathutil.normasprefix(root)
78 # internal config: ui.forcecwd
78 # internal config: ui.forcecwd
79 forcecwd = ui.config('ui', 'forcecwd')
79 forcecwd = ui.config('ui', 'forcecwd')
80 if forcecwd:
80 if forcecwd:
81 self._cwd = forcecwd
81 self._cwd = forcecwd
82 self._dirty = False
82 self._dirty = False
83 self._dirtypl = False
83 self._dirtypl = False
84 self._lastnormaltime = 0
84 self._lastnormaltime = 0
85 self._ui = ui
85 self._ui = ui
86 self._filecache = {}
86 self._filecache = {}
87 self._parentwriters = 0
87 self._parentwriters = 0
88 self._filename = 'dirstate'
88 self._filename = 'dirstate'
89 self._pendingfilename = '%s.pending' % self._filename
89 self._pendingfilename = '%s.pending' % self._filename
90 self._plchangecallbacks = {}
90 self._plchangecallbacks = {}
91 self._origpl = None
91 self._origpl = None
92 self._updatedfiles = set()
92
93
93 # for consistent view between _pl() and _read() invocations
94 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
95 self._pendingmode = None
95
96
96 def beginparentchange(self):
97 def beginparentchange(self):
97 '''Marks the beginning of a set of changes that involve changing
98 '''Marks the beginning of a set of changes that involve changing
98 the dirstate parents. If there is an exception during this time,
99 the dirstate parents. If there is an exception during this time,
99 the dirstate will not be written when the wlock is released. This
100 the dirstate will not be written when the wlock is released. This
100 prevents writing an incoherent dirstate where the parent doesn't
101 prevents writing an incoherent dirstate where the parent doesn't
101 match the contents.
102 match the contents.
102 '''
103 '''
103 self._parentwriters += 1
104 self._parentwriters += 1
104
105
105 def endparentchange(self):
106 def endparentchange(self):
106 '''Marks the end of a set of changes that involve changing the
107 '''Marks the end of a set of changes that involve changing the
107 dirstate parents. Once all parent changes have been marked done,
108 dirstate parents. Once all parent changes have been marked done,
108 the wlock will be free to write the dirstate on release.
109 the wlock will be free to write the dirstate on release.
109 '''
110 '''
110 if self._parentwriters > 0:
111 if self._parentwriters > 0:
111 self._parentwriters -= 1
112 self._parentwriters -= 1
112
113
113 def pendingparentchange(self):
114 def pendingparentchange(self):
114 '''Returns true if the dirstate is in the middle of a set of changes
115 '''Returns true if the dirstate is in the middle of a set of changes
115 that modify the dirstate parent.
116 that modify the dirstate parent.
116 '''
117 '''
117 return self._parentwriters > 0
118 return self._parentwriters > 0
118
119
119 @propertycache
120 @propertycache
120 def _map(self):
121 def _map(self):
121 '''Return the dirstate contents as a map from filename to
122 '''Return the dirstate contents as a map from filename to
122 (state, mode, size, time).'''
123 (state, mode, size, time).'''
123 self._read()
124 self._read()
124 return self._map
125 return self._map
125
126
126 @propertycache
127 @propertycache
127 def _copymap(self):
128 def _copymap(self):
128 self._read()
129 self._read()
129 return self._copymap
130 return self._copymap
130
131
131 @propertycache
132 @propertycache
132 def _nonnormalset(self):
133 def _nonnormalset(self):
133 return nonnormalentries(self._map)
134 return nonnormalentries(self._map)
134
135
135 @propertycache
136 @propertycache
136 def _filefoldmap(self):
137 def _filefoldmap(self):
137 try:
138 try:
138 makefilefoldmap = parsers.make_file_foldmap
139 makefilefoldmap = parsers.make_file_foldmap
139 except AttributeError:
140 except AttributeError:
140 pass
141 pass
141 else:
142 else:
142 return makefilefoldmap(self._map, util.normcasespec,
143 return makefilefoldmap(self._map, util.normcasespec,
143 util.normcasefallback)
144 util.normcasefallback)
144
145
145 f = {}
146 f = {}
146 normcase = util.normcase
147 normcase = util.normcase
147 for name, s in self._map.iteritems():
148 for name, s in self._map.iteritems():
148 if s[0] != 'r':
149 if s[0] != 'r':
149 f[normcase(name)] = name
150 f[normcase(name)] = name
150 f['.'] = '.' # prevents useless util.fspath() invocation
151 f['.'] = '.' # prevents useless util.fspath() invocation
151 return f
152 return f
152
153
153 @propertycache
154 @propertycache
154 def _dirfoldmap(self):
155 def _dirfoldmap(self):
155 f = {}
156 f = {}
156 normcase = util.normcase
157 normcase = util.normcase
157 for name in self._dirs:
158 for name in self._dirs:
158 f[normcase(name)] = name
159 f[normcase(name)] = name
159 return f
160 return f
160
161
161 @repocache('branch')
162 @repocache('branch')
162 def _branch(self):
163 def _branch(self):
163 try:
164 try:
164 return self._opener.read("branch").strip() or "default"
165 return self._opener.read("branch").strip() or "default"
165 except IOError as inst:
166 except IOError as inst:
166 if inst.errno != errno.ENOENT:
167 if inst.errno != errno.ENOENT:
167 raise
168 raise
168 return "default"
169 return "default"
169
170
170 @propertycache
171 @propertycache
171 def _pl(self):
172 def _pl(self):
172 try:
173 try:
173 fp = self._opendirstatefile()
174 fp = self._opendirstatefile()
174 st = fp.read(40)
175 st = fp.read(40)
175 fp.close()
176 fp.close()
176 l = len(st)
177 l = len(st)
177 if l == 40:
178 if l == 40:
178 return st[:20], st[20:40]
179 return st[:20], st[20:40]
179 elif l > 0 and l < 40:
180 elif l > 0 and l < 40:
180 raise error.Abort(_('working directory state appears damaged!'))
181 raise error.Abort(_('working directory state appears damaged!'))
181 except IOError as err:
182 except IOError as err:
182 if err.errno != errno.ENOENT:
183 if err.errno != errno.ENOENT:
183 raise
184 raise
184 return [nullid, nullid]
185 return [nullid, nullid]
185
186
186 @propertycache
187 @propertycache
187 def _dirs(self):
188 def _dirs(self):
188 return util.dirs(self._map, 'r')
189 return util.dirs(self._map, 'r')
189
190
190 def dirs(self):
191 def dirs(self):
191 return self._dirs
192 return self._dirs
192
193
193 @rootcache('.hgignore')
194 @rootcache('.hgignore')
194 def _ignore(self):
195 def _ignore(self):
195 files = self._ignorefiles()
196 files = self._ignorefiles()
196 if not files:
197 if not files:
197 return util.never
198 return util.never
198
199
199 pats = ['include:%s' % f for f in files]
200 pats = ['include:%s' % f for f in files]
200 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
201 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
201
202
202 @propertycache
203 @propertycache
203 def _slash(self):
204 def _slash(self):
204 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
205 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
205
206
206 @propertycache
207 @propertycache
207 def _checklink(self):
208 def _checklink(self):
208 return util.checklink(self._root)
209 return util.checklink(self._root)
209
210
210 @propertycache
211 @propertycache
211 def _checkexec(self):
212 def _checkexec(self):
212 return util.checkexec(self._root)
213 return util.checkexec(self._root)
213
214
214 @propertycache
215 @propertycache
215 def _checkcase(self):
216 def _checkcase(self):
216 return not util.fscasesensitive(self._join('.hg'))
217 return not util.fscasesensitive(self._join('.hg'))
217
218
218 def _join(self, f):
219 def _join(self, f):
219 # much faster than os.path.join()
220 # much faster than os.path.join()
220 # it's safe because f is always a relative path
221 # it's safe because f is always a relative path
221 return self._rootdir + f
222 return self._rootdir + f
222
223
223 def flagfunc(self, buildfallback):
224 def flagfunc(self, buildfallback):
224 if self._checklink and self._checkexec:
225 if self._checklink and self._checkexec:
225 def f(x):
226 def f(x):
226 try:
227 try:
227 st = os.lstat(self._join(x))
228 st = os.lstat(self._join(x))
228 if util.statislink(st):
229 if util.statislink(st):
229 return 'l'
230 return 'l'
230 if util.statisexec(st):
231 if util.statisexec(st):
231 return 'x'
232 return 'x'
232 except OSError:
233 except OSError:
233 pass
234 pass
234 return ''
235 return ''
235 return f
236 return f
236
237
237 fallback = buildfallback()
238 fallback = buildfallback()
238 if self._checklink:
239 if self._checklink:
239 def f(x):
240 def f(x):
240 if os.path.islink(self._join(x)):
241 if os.path.islink(self._join(x)):
241 return 'l'
242 return 'l'
242 if 'x' in fallback(x):
243 if 'x' in fallback(x):
243 return 'x'
244 return 'x'
244 return ''
245 return ''
245 return f
246 return f
246 if self._checkexec:
247 if self._checkexec:
247 def f(x):
248 def f(x):
248 if 'l' in fallback(x):
249 if 'l' in fallback(x):
249 return 'l'
250 return 'l'
250 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
251 return 'x'
252 return 'x'
252 return ''
253 return ''
253 return f
254 return f
254 else:
255 else:
255 return fallback
256 return fallback
256
257
257 @propertycache
258 @propertycache
258 def _cwd(self):
259 def _cwd(self):
259 return pycompat.getcwd()
260 return pycompat.getcwd()
260
261
261 def getcwd(self):
262 def getcwd(self):
262 '''Return the path from which a canonical path is calculated.
263 '''Return the path from which a canonical path is calculated.
263
264
264 This path should be used to resolve file patterns or to convert
265 This path should be used to resolve file patterns or to convert
265 canonical paths back to file paths for display. It shouldn't be
266 canonical paths back to file paths for display. It shouldn't be
266 used to get real file paths. Use vfs functions instead.
267 used to get real file paths. Use vfs functions instead.
267 '''
268 '''
268 cwd = self._cwd
269 cwd = self._cwd
269 if cwd == self._root:
270 if cwd == self._root:
270 return ''
271 return ''
271 # self._root ends with a path separator if self._root is '/' or 'C:\'
272 # self._root ends with a path separator if self._root is '/' or 'C:\'
272 rootsep = self._root
273 rootsep = self._root
273 if not util.endswithsep(rootsep):
274 if not util.endswithsep(rootsep):
274 rootsep += pycompat.ossep
275 rootsep += pycompat.ossep
275 if cwd.startswith(rootsep):
276 if cwd.startswith(rootsep):
276 return cwd[len(rootsep):]
277 return cwd[len(rootsep):]
277 else:
278 else:
278 # we're outside the repo. return an absolute path.
279 # we're outside the repo. return an absolute path.
279 return cwd
280 return cwd
280
281
281 def pathto(self, f, cwd=None):
282 def pathto(self, f, cwd=None):
282 if cwd is None:
283 if cwd is None:
283 cwd = self.getcwd()
284 cwd = self.getcwd()
284 path = util.pathto(self._root, cwd, f)
285 path = util.pathto(self._root, cwd, f)
285 if self._slash:
286 if self._slash:
286 return util.pconvert(path)
287 return util.pconvert(path)
287 return path
288 return path
288
289
289 def __getitem__(self, key):
290 def __getitem__(self, key):
290 '''Return the current state of key (a filename) in the dirstate.
291 '''Return the current state of key (a filename) in the dirstate.
291
292
292 States are:
293 States are:
293 n normal
294 n normal
294 m needs merging
295 m needs merging
295 r marked for removal
296 r marked for removal
296 a marked for addition
297 a marked for addition
297 ? not tracked
298 ? not tracked
298 '''
299 '''
299 return self._map.get(key, ("?",))[0]
300 return self._map.get(key, ("?",))[0]
300
301
301 def __contains__(self, key):
302 def __contains__(self, key):
302 return key in self._map
303 return key in self._map
303
304
304 def __iter__(self):
305 def __iter__(self):
305 for x in sorted(self._map):
306 for x in sorted(self._map):
306 yield x
307 yield x
307
308
308 def iteritems(self):
309 def iteritems(self):
309 return self._map.iteritems()
310 return self._map.iteritems()
310
311
311 def parents(self):
312 def parents(self):
312 return [self._validate(p) for p in self._pl]
313 return [self._validate(p) for p in self._pl]
313
314
314 def p1(self):
315 def p1(self):
315 return self._validate(self._pl[0])
316 return self._validate(self._pl[0])
316
317
317 def p2(self):
318 def p2(self):
318 return self._validate(self._pl[1])
319 return self._validate(self._pl[1])
319
320
320 def branch(self):
321 def branch(self):
321 return encoding.tolocal(self._branch)
322 return encoding.tolocal(self._branch)
322
323
323 def setparents(self, p1, p2=nullid):
324 def setparents(self, p1, p2=nullid):
324 """Set dirstate parents to p1 and p2.
325 """Set dirstate parents to p1 and p2.
325
326
326 When moving from two parents to one, 'm' merged entries a
327 When moving from two parents to one, 'm' merged entries a
327 adjusted to normal and previous copy records discarded and
328 adjusted to normal and previous copy records discarded and
328 returned by the call.
329 returned by the call.
329
330
330 See localrepo.setparents()
331 See localrepo.setparents()
331 """
332 """
332 if self._parentwriters == 0:
333 if self._parentwriters == 0:
333 raise ValueError("cannot set dirstate parent without "
334 raise ValueError("cannot set dirstate parent without "
334 "calling dirstate.beginparentchange")
335 "calling dirstate.beginparentchange")
335
336
336 self._dirty = self._dirtypl = True
337 self._dirty = self._dirtypl = True
337 oldp2 = self._pl[1]
338 oldp2 = self._pl[1]
338 if self._origpl is None:
339 if self._origpl is None:
339 self._origpl = self._pl
340 self._origpl = self._pl
340 self._pl = p1, p2
341 self._pl = p1, p2
341 copies = {}
342 copies = {}
342 if oldp2 != nullid and p2 == nullid:
343 if oldp2 != nullid and p2 == nullid:
343 for f, s in self._map.iteritems():
344 for f, s in self._map.iteritems():
344 # Discard 'm' markers when moving away from a merge state
345 # Discard 'm' markers when moving away from a merge state
345 if s[0] == 'm':
346 if s[0] == 'm':
346 if f in self._copymap:
347 if f in self._copymap:
347 copies[f] = self._copymap[f]
348 copies[f] = self._copymap[f]
348 self.normallookup(f)
349 self.normallookup(f)
349 # Also fix up otherparent markers
350 # Also fix up otherparent markers
350 elif s[0] == 'n' and s[2] == -2:
351 elif s[0] == 'n' and s[2] == -2:
351 if f in self._copymap:
352 if f in self._copymap:
352 copies[f] = self._copymap[f]
353 copies[f] = self._copymap[f]
353 self.add(f)
354 self.add(f)
354 return copies
355 return copies
355
356
356 def setbranch(self, branch):
357 def setbranch(self, branch):
357 self._branch = encoding.fromlocal(branch)
358 self._branch = encoding.fromlocal(branch)
358 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
359 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
359 try:
360 try:
360 f.write(self._branch + '\n')
361 f.write(self._branch + '\n')
361 f.close()
362 f.close()
362
363
363 # make sure filecache has the correct stat info for _branch after
364 # make sure filecache has the correct stat info for _branch after
364 # replacing the underlying file
365 # replacing the underlying file
365 ce = self._filecache['_branch']
366 ce = self._filecache['_branch']
366 if ce:
367 if ce:
367 ce.refresh()
368 ce.refresh()
368 except: # re-raises
369 except: # re-raises
369 f.discard()
370 f.discard()
370 raise
371 raise
371
372
372 def _opendirstatefile(self):
373 def _opendirstatefile(self):
373 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
374 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
374 if self._pendingmode is not None and self._pendingmode != mode:
375 if self._pendingmode is not None and self._pendingmode != mode:
375 fp.close()
376 fp.close()
376 raise error.Abort(_('working directory state may be '
377 raise error.Abort(_('working directory state may be '
377 'changed parallelly'))
378 'changed parallelly'))
378 self._pendingmode = mode
379 self._pendingmode = mode
379 return fp
380 return fp
380
381
381 def _read(self):
382 def _read(self):
382 self._map = {}
383 self._map = {}
383 self._copymap = {}
384 self._copymap = {}
384 try:
385 try:
385 fp = self._opendirstatefile()
386 fp = self._opendirstatefile()
386 try:
387 try:
387 st = fp.read()
388 st = fp.read()
388 finally:
389 finally:
389 fp.close()
390 fp.close()
390 except IOError as err:
391 except IOError as err:
391 if err.errno != errno.ENOENT:
392 if err.errno != errno.ENOENT:
392 raise
393 raise
393 return
394 return
394 if not st:
395 if not st:
395 return
396 return
396
397
397 if util.safehasattr(parsers, 'dict_new_presized'):
398 if util.safehasattr(parsers, 'dict_new_presized'):
398 # Make an estimate of the number of files in the dirstate based on
399 # Make an estimate of the number of files in the dirstate based on
399 # its size. From a linear regression on a set of real-world repos,
400 # its size. From a linear regression on a set of real-world repos,
400 # all over 10,000 files, the size of a dirstate entry is 85
401 # all over 10,000 files, the size of a dirstate entry is 85
401 # bytes. The cost of resizing is significantly higher than the cost
402 # bytes. The cost of resizing is significantly higher than the cost
402 # of filling in a larger presized dict, so subtract 20% from the
403 # of filling in a larger presized dict, so subtract 20% from the
403 # size.
404 # size.
404 #
405 #
405 # This heuristic is imperfect in many ways, so in a future dirstate
406 # This heuristic is imperfect in many ways, so in a future dirstate
406 # format update it makes sense to just record the number of entries
407 # format update it makes sense to just record the number of entries
407 # on write.
408 # on write.
408 self._map = parsers.dict_new_presized(len(st) / 71)
409 self._map = parsers.dict_new_presized(len(st) / 71)
409
410
410 # Python's garbage collector triggers a GC each time a certain number
411 # Python's garbage collector triggers a GC each time a certain number
411 # of container objects (the number being defined by
412 # of container objects (the number being defined by
412 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
413 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
413 # for each file in the dirstate. The C version then immediately marks
414 # for each file in the dirstate. The C version then immediately marks
414 # them as not to be tracked by the collector. However, this has no
415 # them as not to be tracked by the collector. However, this has no
415 # effect on when GCs are triggered, only on what objects the GC looks
416 # effect on when GCs are triggered, only on what objects the GC looks
416 # into. This means that O(number of files) GCs are unavoidable.
417 # into. This means that O(number of files) GCs are unavoidable.
417 # Depending on when in the process's lifetime the dirstate is parsed,
418 # Depending on when in the process's lifetime the dirstate is parsed,
418 # this can get very expensive. As a workaround, disable GC while
419 # this can get very expensive. As a workaround, disable GC while
419 # parsing the dirstate.
420 # parsing the dirstate.
420 #
421 #
421 # (we cannot decorate the function directly since it is in a C module)
422 # (we cannot decorate the function directly since it is in a C module)
422 parse_dirstate = util.nogc(parsers.parse_dirstate)
423 parse_dirstate = util.nogc(parsers.parse_dirstate)
423 p = parse_dirstate(self._map, self._copymap, st)
424 p = parse_dirstate(self._map, self._copymap, st)
424 if not self._dirtypl:
425 if not self._dirtypl:
425 self._pl = p
426 self._pl = p
426
427
427 def invalidate(self):
428 def invalidate(self):
428 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
429 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
429 "_pl", "_dirs", "_ignore", "_nonnormalset"):
430 "_pl", "_dirs", "_ignore", "_nonnormalset"):
430 if a in self.__dict__:
431 if a in self.__dict__:
431 delattr(self, a)
432 delattr(self, a)
432 self._lastnormaltime = 0
433 self._lastnormaltime = 0
433 self._dirty = False
434 self._dirty = False
435 self._updatedfiles.clear()
434 self._parentwriters = 0
436 self._parentwriters = 0
435 self._origpl = None
437 self._origpl = None
436
438
437 def copy(self, source, dest):
439 def copy(self, source, dest):
438 """Mark dest as a copy of source. Unmark dest if source is None."""
440 """Mark dest as a copy of source. Unmark dest if source is None."""
439 if source == dest:
441 if source == dest:
440 return
442 return
441 self._dirty = True
443 self._dirty = True
442 if source is not None:
444 if source is not None:
443 self._copymap[dest] = source
445 self._copymap[dest] = source
446 self._updatedfiles.add(source)
447 self._updatedfiles.add(dest)
444 elif dest in self._copymap:
448 elif dest in self._copymap:
445 del self._copymap[dest]
449 del self._copymap[dest]
450 self._updatedfiles.add(dest)
446
451
447 def copied(self, file):
452 def copied(self, file):
448 return self._copymap.get(file, None)
453 return self._copymap.get(file, None)
449
454
450 def copies(self):
455 def copies(self):
451 return self._copymap
456 return self._copymap
452
457
453 def _droppath(self, f):
458 def _droppath(self, f):
454 if self[f] not in "?r" and "_dirs" in self.__dict__:
459 if self[f] not in "?r" and "_dirs" in self.__dict__:
455 self._dirs.delpath(f)
460 self._dirs.delpath(f)
456
461
457 if "_filefoldmap" in self.__dict__:
462 if "_filefoldmap" in self.__dict__:
458 normed = util.normcase(f)
463 normed = util.normcase(f)
459 if normed in self._filefoldmap:
464 if normed in self._filefoldmap:
460 del self._filefoldmap[normed]
465 del self._filefoldmap[normed]
461
466
467 self._updatedfiles.add(f)
468
462 def _addpath(self, f, state, mode, size, mtime):
469 def _addpath(self, f, state, mode, size, mtime):
463 oldstate = self[f]
470 oldstate = self[f]
464 if state == 'a' or oldstate == 'r':
471 if state == 'a' or oldstate == 'r':
465 scmutil.checkfilename(f)
472 scmutil.checkfilename(f)
466 if f in self._dirs:
473 if f in self._dirs:
467 raise error.Abort(_('directory %r already in dirstate') % f)
474 raise error.Abort(_('directory %r already in dirstate') % f)
468 # shadows
475 # shadows
469 for d in util.finddirs(f):
476 for d in util.finddirs(f):
470 if d in self._dirs:
477 if d in self._dirs:
471 break
478 break
472 if d in self._map and self[d] != 'r':
479 if d in self._map and self[d] != 'r':
473 raise error.Abort(
480 raise error.Abort(
474 _('file %r in dirstate clashes with %r') % (d, f))
481 _('file %r in dirstate clashes with %r') % (d, f))
475 if oldstate in "?r" and "_dirs" in self.__dict__:
482 if oldstate in "?r" and "_dirs" in self.__dict__:
476 self._dirs.addpath(f)
483 self._dirs.addpath(f)
477 self._dirty = True
484 self._dirty = True
485 self._updatedfiles.add(f)
478 self._map[f] = dirstatetuple(state, mode, size, mtime)
486 self._map[f] = dirstatetuple(state, mode, size, mtime)
479 if state != 'n' or mtime == -1:
487 if state != 'n' or mtime == -1:
480 self._nonnormalset.add(f)
488 self._nonnormalset.add(f)
481
489
482 def normal(self, f):
490 def normal(self, f):
483 '''Mark a file normal and clean.'''
491 '''Mark a file normal and clean.'''
484 s = os.lstat(self._join(f))
492 s = os.lstat(self._join(f))
485 mtime = s.st_mtime
493 mtime = s.st_mtime
486 self._addpath(f, 'n', s.st_mode,
494 self._addpath(f, 'n', s.st_mode,
487 s.st_size & _rangemask, mtime & _rangemask)
495 s.st_size & _rangemask, mtime & _rangemask)
488 if f in self._copymap:
496 if f in self._copymap:
489 del self._copymap[f]
497 del self._copymap[f]
490 if f in self._nonnormalset:
498 if f in self._nonnormalset:
491 self._nonnormalset.remove(f)
499 self._nonnormalset.remove(f)
492 if mtime > self._lastnormaltime:
500 if mtime > self._lastnormaltime:
493 # Remember the most recent modification timeslot for status(),
501 # Remember the most recent modification timeslot for status(),
494 # to make sure we won't miss future size-preserving file content
502 # to make sure we won't miss future size-preserving file content
495 # modifications that happen within the same timeslot.
503 # modifications that happen within the same timeslot.
496 self._lastnormaltime = mtime
504 self._lastnormaltime = mtime
497
505
498 def normallookup(self, f):
506 def normallookup(self, f):
499 '''Mark a file normal, but possibly dirty.'''
507 '''Mark a file normal, but possibly dirty.'''
500 if self._pl[1] != nullid and f in self._map:
508 if self._pl[1] != nullid and f in self._map:
501 # if there is a merge going on and the file was either
509 # if there is a merge going on and the file was either
502 # in state 'm' (-1) or coming from other parent (-2) before
510 # in state 'm' (-1) or coming from other parent (-2) before
503 # being removed, restore that state.
511 # being removed, restore that state.
504 entry = self._map[f]
512 entry = self._map[f]
505 if entry[0] == 'r' and entry[2] in (-1, -2):
513 if entry[0] == 'r' and entry[2] in (-1, -2):
506 source = self._copymap.get(f)
514 source = self._copymap.get(f)
507 if entry[2] == -1:
515 if entry[2] == -1:
508 self.merge(f)
516 self.merge(f)
509 elif entry[2] == -2:
517 elif entry[2] == -2:
510 self.otherparent(f)
518 self.otherparent(f)
511 if source:
519 if source:
512 self.copy(source, f)
520 self.copy(source, f)
513 return
521 return
514 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
522 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
515 return
523 return
516 self._addpath(f, 'n', 0, -1, -1)
524 self._addpath(f, 'n', 0, -1, -1)
517 if f in self._copymap:
525 if f in self._copymap:
518 del self._copymap[f]
526 del self._copymap[f]
519 if f in self._nonnormalset:
527 if f in self._nonnormalset:
520 self._nonnormalset.remove(f)
528 self._nonnormalset.remove(f)
521
529
522 def otherparent(self, f):
530 def otherparent(self, f):
523 '''Mark as coming from the other parent, always dirty.'''
531 '''Mark as coming from the other parent, always dirty.'''
524 if self._pl[1] == nullid:
532 if self._pl[1] == nullid:
525 raise error.Abort(_("setting %r to other parent "
533 raise error.Abort(_("setting %r to other parent "
526 "only allowed in merges") % f)
534 "only allowed in merges") % f)
527 if f in self and self[f] == 'n':
535 if f in self and self[f] == 'n':
528 # merge-like
536 # merge-like
529 self._addpath(f, 'm', 0, -2, -1)
537 self._addpath(f, 'm', 0, -2, -1)
530 else:
538 else:
531 # add-like
539 # add-like
532 self._addpath(f, 'n', 0, -2, -1)
540 self._addpath(f, 'n', 0, -2, -1)
533
541
534 if f in self._copymap:
542 if f in self._copymap:
535 del self._copymap[f]
543 del self._copymap[f]
536
544
537 def add(self, f):
545 def add(self, f):
538 '''Mark a file added.'''
546 '''Mark a file added.'''
539 self._addpath(f, 'a', 0, -1, -1)
547 self._addpath(f, 'a', 0, -1, -1)
540 if f in self._copymap:
548 if f in self._copymap:
541 del self._copymap[f]
549 del self._copymap[f]
542
550
543 def remove(self, f):
551 def remove(self, f):
544 '''Mark a file removed.'''
552 '''Mark a file removed.'''
545 self._dirty = True
553 self._dirty = True
546 self._droppath(f)
554 self._droppath(f)
547 size = 0
555 size = 0
548 if self._pl[1] != nullid and f in self._map:
556 if self._pl[1] != nullid and f in self._map:
549 # backup the previous state
557 # backup the previous state
550 entry = self._map[f]
558 entry = self._map[f]
551 if entry[0] == 'm': # merge
559 if entry[0] == 'm': # merge
552 size = -1
560 size = -1
553 elif entry[0] == 'n' and entry[2] == -2: # other parent
561 elif entry[0] == 'n' and entry[2] == -2: # other parent
554 size = -2
562 size = -2
555 self._map[f] = dirstatetuple('r', 0, size, 0)
563 self._map[f] = dirstatetuple('r', 0, size, 0)
556 self._nonnormalset.add(f)
564 self._nonnormalset.add(f)
557 if size == 0 and f in self._copymap:
565 if size == 0 and f in self._copymap:
558 del self._copymap[f]
566 del self._copymap[f]
559
567
560 def merge(self, f):
568 def merge(self, f):
561 '''Mark a file merged.'''
569 '''Mark a file merged.'''
562 if self._pl[1] == nullid:
570 if self._pl[1] == nullid:
563 return self.normallookup(f)
571 return self.normallookup(f)
564 return self.otherparent(f)
572 return self.otherparent(f)
565
573
566 def drop(self, f):
574 def drop(self, f):
567 '''Drop a file from the dirstate'''
575 '''Drop a file from the dirstate'''
568 if f in self._map:
576 if f in self._map:
569 self._dirty = True
577 self._dirty = True
570 self._droppath(f)
578 self._droppath(f)
571 del self._map[f]
579 del self._map[f]
572 if f in self._nonnormalset:
580 if f in self._nonnormalset:
573 self._nonnormalset.remove(f)
581 self._nonnormalset.remove(f)
574 if f in self._copymap:
582 if f in self._copymap:
575 del self._copymap[f]
583 del self._copymap[f]
576
584
577 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
585 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
578 if exists is None:
586 if exists is None:
579 exists = os.path.lexists(os.path.join(self._root, path))
587 exists = os.path.lexists(os.path.join(self._root, path))
580 if not exists:
588 if not exists:
581 # Maybe a path component exists
589 # Maybe a path component exists
582 if not ignoremissing and '/' in path:
590 if not ignoremissing and '/' in path:
583 d, f = path.rsplit('/', 1)
591 d, f = path.rsplit('/', 1)
584 d = self._normalize(d, False, ignoremissing, None)
592 d = self._normalize(d, False, ignoremissing, None)
585 folded = d + "/" + f
593 folded = d + "/" + f
586 else:
594 else:
587 # No path components, preserve original case
595 # No path components, preserve original case
588 folded = path
596 folded = path
589 else:
597 else:
590 # recursively normalize leading directory components
598 # recursively normalize leading directory components
591 # against dirstate
599 # against dirstate
592 if '/' in normed:
600 if '/' in normed:
593 d, f = normed.rsplit('/', 1)
601 d, f = normed.rsplit('/', 1)
594 d = self._normalize(d, False, ignoremissing, True)
602 d = self._normalize(d, False, ignoremissing, True)
595 r = self._root + "/" + d
603 r = self._root + "/" + d
596 folded = d + "/" + util.fspath(f, r)
604 folded = d + "/" + util.fspath(f, r)
597 else:
605 else:
598 folded = util.fspath(normed, self._root)
606 folded = util.fspath(normed, self._root)
599 storemap[normed] = folded
607 storemap[normed] = folded
600
608
601 return folded
609 return folded
602
610
603 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
611 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
604 normed = util.normcase(path)
612 normed = util.normcase(path)
605 folded = self._filefoldmap.get(normed, None)
613 folded = self._filefoldmap.get(normed, None)
606 if folded is None:
614 if folded is None:
607 if isknown:
615 if isknown:
608 folded = path
616 folded = path
609 else:
617 else:
610 folded = self._discoverpath(path, normed, ignoremissing, exists,
618 folded = self._discoverpath(path, normed, ignoremissing, exists,
611 self._filefoldmap)
619 self._filefoldmap)
612 return folded
620 return folded
613
621
614 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
622 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
615 normed = util.normcase(path)
623 normed = util.normcase(path)
616 folded = self._filefoldmap.get(normed, None)
624 folded = self._filefoldmap.get(normed, None)
617 if folded is None:
625 if folded is None:
618 folded = self._dirfoldmap.get(normed, None)
626 folded = self._dirfoldmap.get(normed, None)
619 if folded is None:
627 if folded is None:
620 if isknown:
628 if isknown:
621 folded = path
629 folded = path
622 else:
630 else:
623 # store discovered result in dirfoldmap so that future
631 # store discovered result in dirfoldmap so that future
624 # normalizefile calls don't start matching directories
632 # normalizefile calls don't start matching directories
625 folded = self._discoverpath(path, normed, ignoremissing, exists,
633 folded = self._discoverpath(path, normed, ignoremissing, exists,
626 self._dirfoldmap)
634 self._dirfoldmap)
627 return folded
635 return folded
628
636
629 def normalize(self, path, isknown=False, ignoremissing=False):
637 def normalize(self, path, isknown=False, ignoremissing=False):
630 '''
638 '''
631 normalize the case of a pathname when on a casefolding filesystem
639 normalize the case of a pathname when on a casefolding filesystem
632
640
633 isknown specifies whether the filename came from walking the
641 isknown specifies whether the filename came from walking the
634 disk, to avoid extra filesystem access.
642 disk, to avoid extra filesystem access.
635
643
636 If ignoremissing is True, missing path are returned
644 If ignoremissing is True, missing path are returned
637 unchanged. Otherwise, we try harder to normalize possibly
645 unchanged. Otherwise, we try harder to normalize possibly
638 existing path components.
646 existing path components.
639
647
640 The normalized case is determined based on the following precedence:
648 The normalized case is determined based on the following precedence:
641
649
642 - version of name already stored in the dirstate
650 - version of name already stored in the dirstate
643 - version of name stored on disk
651 - version of name stored on disk
644 - version provided via command arguments
652 - version provided via command arguments
645 '''
653 '''
646
654
647 if self._checkcase:
655 if self._checkcase:
648 return self._normalize(path, isknown, ignoremissing)
656 return self._normalize(path, isknown, ignoremissing)
649 return path
657 return path
650
658
651 def clear(self):
659 def clear(self):
652 self._map = {}
660 self._map = {}
653 self._nonnormalset = set()
661 self._nonnormalset = set()
654 if "_dirs" in self.__dict__:
662 if "_dirs" in self.__dict__:
655 delattr(self, "_dirs")
663 delattr(self, "_dirs")
656 self._copymap = {}
664 self._copymap = {}
657 self._pl = [nullid, nullid]
665 self._pl = [nullid, nullid]
658 self._lastnormaltime = 0
666 self._lastnormaltime = 0
667 self._updatedfiles.clear()
659 self._dirty = True
668 self._dirty = True
660
669
661 def rebuild(self, parent, allfiles, changedfiles=None):
670 def rebuild(self, parent, allfiles, changedfiles=None):
662 if changedfiles is None:
671 if changedfiles is None:
663 # Rebuild entire dirstate
672 # Rebuild entire dirstate
664 changedfiles = allfiles
673 changedfiles = allfiles
665 lastnormaltime = self._lastnormaltime
674 lastnormaltime = self._lastnormaltime
666 self.clear()
675 self.clear()
667 self._lastnormaltime = lastnormaltime
676 self._lastnormaltime = lastnormaltime
668
677
669 if self._origpl is None:
678 if self._origpl is None:
670 self._origpl = self._pl
679 self._origpl = self._pl
671 self._pl = (parent, nullid)
680 self._pl = (parent, nullid)
672 for f in changedfiles:
681 for f in changedfiles:
673 if f in allfiles:
682 if f in allfiles:
674 self.normallookup(f)
683 self.normallookup(f)
675 else:
684 else:
676 self.drop(f)
685 self.drop(f)
677
686
678 self._dirty = True
687 self._dirty = True
679
688
680 def write(self, tr):
689 def write(self, tr):
681 if not self._dirty:
690 if not self._dirty:
682 return
691 return
683
692
684 filename = self._filename
693 filename = self._filename
685 if tr:
694 if tr:
686 # 'dirstate.write()' is not only for writing in-memory
695 # 'dirstate.write()' is not only for writing in-memory
687 # changes out, but also for dropping ambiguous timestamp.
696 # changes out, but also for dropping ambiguous timestamp.
688 # delayed writing re-raise "ambiguous timestamp issue".
697 # delayed writing re-raise "ambiguous timestamp issue".
689 # See also the wiki page below for detail:
698 # See also the wiki page below for detail:
690 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
699 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
691
700
692 # emulate dropping timestamp in 'parsers.pack_dirstate'
701 # emulate dropping timestamp in 'parsers.pack_dirstate'
693 now = _getfsnow(self._opener)
702 now = _getfsnow(self._opener)
694 dmap = self._map
703 dmap = self._map
695 for f, e in dmap.iteritems():
704 for f in self._updatedfiles:
696 if e[0] == 'n' and e[3] == now:
705 e = dmap.get(f)
706 if e is not None and e[0] == 'n' and e[3] == now:
697 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
707 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
698 self._nonnormalset.add(f)
708 self._nonnormalset.add(f)
699
709
700 # emulate that all 'dirstate.normal' results are written out
710 # emulate that all 'dirstate.normal' results are written out
701 self._lastnormaltime = 0
711 self._lastnormaltime = 0
712 self._updatedfiles.clear()
702
713
703 # delay writing in-memory changes out
714 # delay writing in-memory changes out
704 tr.addfilegenerator('dirstate', (self._filename,),
715 tr.addfilegenerator('dirstate', (self._filename,),
705 self._writedirstate, location='plain')
716 self._writedirstate, location='plain')
706 return
717 return
707
718
708 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
719 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
709 self._writedirstate(st)
720 self._writedirstate(st)
710
721
711 def addparentchangecallback(self, category, callback):
722 def addparentchangecallback(self, category, callback):
712 """add a callback to be called when the wd parents are changed
723 """add a callback to be called when the wd parents are changed
713
724
714 Callback will be called with the following arguments:
725 Callback will be called with the following arguments:
715 dirstate, (oldp1, oldp2), (newp1, newp2)
726 dirstate, (oldp1, oldp2), (newp1, newp2)
716
727
717 Category is a unique identifier to allow overwriting an old callback
728 Category is a unique identifier to allow overwriting an old callback
718 with a newer callback.
729 with a newer callback.
719 """
730 """
720 self._plchangecallbacks[category] = callback
731 self._plchangecallbacks[category] = callback
721
732
722 def _writedirstate(self, st):
733 def _writedirstate(self, st):
723 # notify callbacks about parents change
734 # notify callbacks about parents change
724 if self._origpl is not None and self._origpl != self._pl:
735 if self._origpl is not None and self._origpl != self._pl:
725 for c, callback in sorted(self._plchangecallbacks.iteritems()):
736 for c, callback in sorted(self._plchangecallbacks.iteritems()):
726 callback(self, self._origpl, self._pl)
737 callback(self, self._origpl, self._pl)
727 self._origpl = None
738 self._origpl = None
728 # use the modification time of the newly created temporary file as the
739 # use the modification time of the newly created temporary file as the
729 # filesystem's notion of 'now'
740 # filesystem's notion of 'now'
730 now = util.fstat(st).st_mtime & _rangemask
741 now = util.fstat(st).st_mtime & _rangemask
731
742
732 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
743 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
733 # timestamp of each entries in dirstate, because of 'now > mtime'
744 # timestamp of each entries in dirstate, because of 'now > mtime'
734 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
745 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
735 if delaywrite > 0:
746 if delaywrite > 0:
736 # do we have any files to delay for?
747 # do we have any files to delay for?
737 for f, e in self._map.iteritems():
748 for f, e in self._map.iteritems():
738 if e[0] == 'n' and e[3] == now:
749 if e[0] == 'n' and e[3] == now:
739 import time # to avoid useless import
750 import time # to avoid useless import
740 # rather than sleep n seconds, sleep until the next
751 # rather than sleep n seconds, sleep until the next
741 # multiple of n seconds
752 # multiple of n seconds
742 clock = time.time()
753 clock = time.time()
743 start = int(clock) - (int(clock) % delaywrite)
754 start = int(clock) - (int(clock) % delaywrite)
744 end = start + delaywrite
755 end = start + delaywrite
745 time.sleep(end - clock)
756 time.sleep(end - clock)
746 now = end # trust our estimate that the end is near now
757 now = end # trust our estimate that the end is near now
747 break
758 break
748
759
749 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
760 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
750 self._nonnormalset = nonnormalentries(self._map)
761 self._nonnormalset = nonnormalentries(self._map)
751 st.close()
762 st.close()
752 self._lastnormaltime = 0
763 self._lastnormaltime = 0
753 self._dirty = self._dirtypl = False
764 self._dirty = self._dirtypl = False
754
765
755 def _dirignore(self, f):
766 def _dirignore(self, f):
756 if f == '.':
767 if f == '.':
757 return False
768 return False
758 if self._ignore(f):
769 if self._ignore(f):
759 return True
770 return True
760 for p in util.finddirs(f):
771 for p in util.finddirs(f):
761 if self._ignore(p):
772 if self._ignore(p):
762 return True
773 return True
763 return False
774 return False
764
775
765 def _ignorefiles(self):
776 def _ignorefiles(self):
766 files = []
777 files = []
767 if os.path.exists(self._join('.hgignore')):
778 if os.path.exists(self._join('.hgignore')):
768 files.append(self._join('.hgignore'))
779 files.append(self._join('.hgignore'))
769 for name, path in self._ui.configitems("ui"):
780 for name, path in self._ui.configitems("ui"):
770 if name == 'ignore' or name.startswith('ignore.'):
781 if name == 'ignore' or name.startswith('ignore.'):
771 # we need to use os.path.join here rather than self._join
782 # we need to use os.path.join here rather than self._join
772 # because path is arbitrary and user-specified
783 # because path is arbitrary and user-specified
773 files.append(os.path.join(self._rootdir, util.expandpath(path)))
784 files.append(os.path.join(self._rootdir, util.expandpath(path)))
774 return files
785 return files
775
786
776 def _ignorefileandline(self, f):
787 def _ignorefileandline(self, f):
777 files = collections.deque(self._ignorefiles())
788 files = collections.deque(self._ignorefiles())
778 visited = set()
789 visited = set()
779 while files:
790 while files:
780 i = files.popleft()
791 i = files.popleft()
781 patterns = matchmod.readpatternfile(i, self._ui.warn,
792 patterns = matchmod.readpatternfile(i, self._ui.warn,
782 sourceinfo=True)
793 sourceinfo=True)
783 for pattern, lineno, line in patterns:
794 for pattern, lineno, line in patterns:
784 kind, p = matchmod._patsplit(pattern, 'glob')
795 kind, p = matchmod._patsplit(pattern, 'glob')
785 if kind == "subinclude":
796 if kind == "subinclude":
786 if p not in visited:
797 if p not in visited:
787 files.append(p)
798 files.append(p)
788 continue
799 continue
789 m = matchmod.match(self._root, '', [], [pattern],
800 m = matchmod.match(self._root, '', [], [pattern],
790 warn=self._ui.warn)
801 warn=self._ui.warn)
791 if m(f):
802 if m(f):
792 return (i, lineno, line)
803 return (i, lineno, line)
793 visited.add(i)
804 visited.add(i)
794 return (None, -1, "")
805 return (None, -1, "")
795
806
796 def _walkexplicit(self, match, subrepos):
807 def _walkexplicit(self, match, subrepos):
797 '''Get stat data about the files explicitly specified by match.
808 '''Get stat data about the files explicitly specified by match.
798
809
799 Return a triple (results, dirsfound, dirsnotfound).
810 Return a triple (results, dirsfound, dirsnotfound).
800 - results is a mapping from filename to stat result. It also contains
811 - results is a mapping from filename to stat result. It also contains
801 listings mapping subrepos and .hg to None.
812 listings mapping subrepos and .hg to None.
802 - dirsfound is a list of files found to be directories.
813 - dirsfound is a list of files found to be directories.
803 - dirsnotfound is a list of files that the dirstate thinks are
814 - dirsnotfound is a list of files that the dirstate thinks are
804 directories and that were not found.'''
815 directories and that were not found.'''
805
816
806 def badtype(mode):
817 def badtype(mode):
807 kind = _('unknown')
818 kind = _('unknown')
808 if stat.S_ISCHR(mode):
819 if stat.S_ISCHR(mode):
809 kind = _('character device')
820 kind = _('character device')
810 elif stat.S_ISBLK(mode):
821 elif stat.S_ISBLK(mode):
811 kind = _('block device')
822 kind = _('block device')
812 elif stat.S_ISFIFO(mode):
823 elif stat.S_ISFIFO(mode):
813 kind = _('fifo')
824 kind = _('fifo')
814 elif stat.S_ISSOCK(mode):
825 elif stat.S_ISSOCK(mode):
815 kind = _('socket')
826 kind = _('socket')
816 elif stat.S_ISDIR(mode):
827 elif stat.S_ISDIR(mode):
817 kind = _('directory')
828 kind = _('directory')
818 return _('unsupported file type (type is %s)') % kind
829 return _('unsupported file type (type is %s)') % kind
819
830
820 matchedir = match.explicitdir
831 matchedir = match.explicitdir
821 badfn = match.bad
832 badfn = match.bad
822 dmap = self._map
833 dmap = self._map
823 lstat = os.lstat
834 lstat = os.lstat
824 getkind = stat.S_IFMT
835 getkind = stat.S_IFMT
825 dirkind = stat.S_IFDIR
836 dirkind = stat.S_IFDIR
826 regkind = stat.S_IFREG
837 regkind = stat.S_IFREG
827 lnkkind = stat.S_IFLNK
838 lnkkind = stat.S_IFLNK
828 join = self._join
839 join = self._join
829 dirsfound = []
840 dirsfound = []
830 foundadd = dirsfound.append
841 foundadd = dirsfound.append
831 dirsnotfound = []
842 dirsnotfound = []
832 notfoundadd = dirsnotfound.append
843 notfoundadd = dirsnotfound.append
833
844
834 if not match.isexact() and self._checkcase:
845 if not match.isexact() and self._checkcase:
835 normalize = self._normalize
846 normalize = self._normalize
836 else:
847 else:
837 normalize = None
848 normalize = None
838
849
839 files = sorted(match.files())
850 files = sorted(match.files())
840 subrepos.sort()
851 subrepos.sort()
841 i, j = 0, 0
852 i, j = 0, 0
842 while i < len(files) and j < len(subrepos):
853 while i < len(files) and j < len(subrepos):
843 subpath = subrepos[j] + "/"
854 subpath = subrepos[j] + "/"
844 if files[i] < subpath:
855 if files[i] < subpath:
845 i += 1
856 i += 1
846 continue
857 continue
847 while i < len(files) and files[i].startswith(subpath):
858 while i < len(files) and files[i].startswith(subpath):
848 del files[i]
859 del files[i]
849 j += 1
860 j += 1
850
861
851 if not files or '.' in files:
862 if not files or '.' in files:
852 files = ['.']
863 files = ['.']
853 results = dict.fromkeys(subrepos)
864 results = dict.fromkeys(subrepos)
854 results['.hg'] = None
865 results['.hg'] = None
855
866
856 alldirs = None
867 alldirs = None
857 for ff in files:
868 for ff in files:
858 # constructing the foldmap is expensive, so don't do it for the
869 # constructing the foldmap is expensive, so don't do it for the
859 # common case where files is ['.']
870 # common case where files is ['.']
860 if normalize and ff != '.':
871 if normalize and ff != '.':
861 nf = normalize(ff, False, True)
872 nf = normalize(ff, False, True)
862 else:
873 else:
863 nf = ff
874 nf = ff
864 if nf in results:
875 if nf in results:
865 continue
876 continue
866
877
867 try:
878 try:
868 st = lstat(join(nf))
879 st = lstat(join(nf))
869 kind = getkind(st.st_mode)
880 kind = getkind(st.st_mode)
870 if kind == dirkind:
881 if kind == dirkind:
871 if nf in dmap:
882 if nf in dmap:
872 # file replaced by dir on disk but still in dirstate
883 # file replaced by dir on disk but still in dirstate
873 results[nf] = None
884 results[nf] = None
874 if matchedir:
885 if matchedir:
875 matchedir(nf)
886 matchedir(nf)
876 foundadd((nf, ff))
887 foundadd((nf, ff))
877 elif kind == regkind or kind == lnkkind:
888 elif kind == regkind or kind == lnkkind:
878 results[nf] = st
889 results[nf] = st
879 else:
890 else:
880 badfn(ff, badtype(kind))
891 badfn(ff, badtype(kind))
881 if nf in dmap:
892 if nf in dmap:
882 results[nf] = None
893 results[nf] = None
883 except OSError as inst: # nf not found on disk - it is dirstate only
894 except OSError as inst: # nf not found on disk - it is dirstate only
884 if nf in dmap: # does it exactly match a missing file?
895 if nf in dmap: # does it exactly match a missing file?
885 results[nf] = None
896 results[nf] = None
886 else: # does it match a missing directory?
897 else: # does it match a missing directory?
887 if alldirs is None:
898 if alldirs is None:
888 alldirs = util.dirs(dmap)
899 alldirs = util.dirs(dmap)
889 if nf in alldirs:
900 if nf in alldirs:
890 if matchedir:
901 if matchedir:
891 matchedir(nf)
902 matchedir(nf)
892 notfoundadd(nf)
903 notfoundadd(nf)
893 else:
904 else:
894 badfn(ff, inst.strerror)
905 badfn(ff, inst.strerror)
895
906
896 # Case insensitive filesystems cannot rely on lstat() failing to detect
907 # Case insensitive filesystems cannot rely on lstat() failing to detect
897 # a case-only rename. Prune the stat object for any file that does not
908 # a case-only rename. Prune the stat object for any file that does not
898 # match the case in the filesystem, if there are multiple files that
909 # match the case in the filesystem, if there are multiple files that
899 # normalize to the same path.
910 # normalize to the same path.
900 if match.isexact() and self._checkcase:
911 if match.isexact() and self._checkcase:
901 normed = {}
912 normed = {}
902
913
903 for f, st in results.iteritems():
914 for f, st in results.iteritems():
904 if st is None:
915 if st is None:
905 continue
916 continue
906
917
907 nc = util.normcase(f)
918 nc = util.normcase(f)
908 paths = normed.get(nc)
919 paths = normed.get(nc)
909
920
910 if paths is None:
921 if paths is None:
911 paths = set()
922 paths = set()
912 normed[nc] = paths
923 normed[nc] = paths
913
924
914 paths.add(f)
925 paths.add(f)
915
926
916 for norm, paths in normed.iteritems():
927 for norm, paths in normed.iteritems():
917 if len(paths) > 1:
928 if len(paths) > 1:
918 for path in paths:
929 for path in paths:
919 folded = self._discoverpath(path, norm, True, None,
930 folded = self._discoverpath(path, norm, True, None,
920 self._dirfoldmap)
931 self._dirfoldmap)
921 if path != folded:
932 if path != folded:
922 results[path] = None
933 results[path] = None
923
934
924 return results, dirsfound, dirsnotfound
935 return results, dirsfound, dirsnotfound
925
936
926 def walk(self, match, subrepos, unknown, ignored, full=True):
937 def walk(self, match, subrepos, unknown, ignored, full=True):
927 '''
938 '''
928 Walk recursively through the directory tree, finding all files
939 Walk recursively through the directory tree, finding all files
929 matched by match.
940 matched by match.
930
941
931 If full is False, maybe skip some known-clean files.
942 If full is False, maybe skip some known-clean files.
932
943
933 Return a dict mapping filename to stat-like object (either
944 Return a dict mapping filename to stat-like object (either
934 mercurial.osutil.stat instance or return value of os.stat()).
945 mercurial.osutil.stat instance or return value of os.stat()).
935
946
936 '''
947 '''
937 # full is a flag that extensions that hook into walk can use -- this
948 # full is a flag that extensions that hook into walk can use -- this
938 # implementation doesn't use it at all. This satisfies the contract
949 # implementation doesn't use it at all. This satisfies the contract
939 # because we only guarantee a "maybe".
950 # because we only guarantee a "maybe".
940
951
941 if ignored:
952 if ignored:
942 ignore = util.never
953 ignore = util.never
943 dirignore = util.never
954 dirignore = util.never
944 elif unknown:
955 elif unknown:
945 ignore = self._ignore
956 ignore = self._ignore
946 dirignore = self._dirignore
957 dirignore = self._dirignore
947 else:
958 else:
948 # if not unknown and not ignored, drop dir recursion and step 2
959 # if not unknown and not ignored, drop dir recursion and step 2
949 ignore = util.always
960 ignore = util.always
950 dirignore = util.always
961 dirignore = util.always
951
962
952 matchfn = match.matchfn
963 matchfn = match.matchfn
953 matchalways = match.always()
964 matchalways = match.always()
954 matchtdir = match.traversedir
965 matchtdir = match.traversedir
955 dmap = self._map
966 dmap = self._map
956 listdir = osutil.listdir
967 listdir = osutil.listdir
957 lstat = os.lstat
968 lstat = os.lstat
958 dirkind = stat.S_IFDIR
969 dirkind = stat.S_IFDIR
959 regkind = stat.S_IFREG
970 regkind = stat.S_IFREG
960 lnkkind = stat.S_IFLNK
971 lnkkind = stat.S_IFLNK
961 join = self._join
972 join = self._join
962
973
963 exact = skipstep3 = False
974 exact = skipstep3 = False
964 if match.isexact(): # match.exact
975 if match.isexact(): # match.exact
965 exact = True
976 exact = True
966 dirignore = util.always # skip step 2
977 dirignore = util.always # skip step 2
967 elif match.prefix(): # match.match, no patterns
978 elif match.prefix(): # match.match, no patterns
968 skipstep3 = True
979 skipstep3 = True
969
980
970 if not exact and self._checkcase:
981 if not exact and self._checkcase:
971 normalize = self._normalize
982 normalize = self._normalize
972 normalizefile = self._normalizefile
983 normalizefile = self._normalizefile
973 skipstep3 = False
984 skipstep3 = False
974 else:
985 else:
975 normalize = self._normalize
986 normalize = self._normalize
976 normalizefile = None
987 normalizefile = None
977
988
978 # step 1: find all explicit files
989 # step 1: find all explicit files
979 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
990 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
980
991
981 skipstep3 = skipstep3 and not (work or dirsnotfound)
992 skipstep3 = skipstep3 and not (work or dirsnotfound)
982 work = [d for d in work if not dirignore(d[0])]
993 work = [d for d in work if not dirignore(d[0])]
983
994
984 # step 2: visit subdirectories
995 # step 2: visit subdirectories
985 def traverse(work, alreadynormed):
996 def traverse(work, alreadynormed):
986 wadd = work.append
997 wadd = work.append
987 while work:
998 while work:
988 nd = work.pop()
999 nd = work.pop()
989 skip = None
1000 skip = None
990 if nd == '.':
1001 if nd == '.':
991 nd = ''
1002 nd = ''
992 else:
1003 else:
993 skip = '.hg'
1004 skip = '.hg'
994 try:
1005 try:
995 entries = listdir(join(nd), stat=True, skip=skip)
1006 entries = listdir(join(nd), stat=True, skip=skip)
996 except OSError as inst:
1007 except OSError as inst:
997 if inst.errno in (errno.EACCES, errno.ENOENT):
1008 if inst.errno in (errno.EACCES, errno.ENOENT):
998 match.bad(self.pathto(nd), inst.strerror)
1009 match.bad(self.pathto(nd), inst.strerror)
999 continue
1010 continue
1000 raise
1011 raise
1001 for f, kind, st in entries:
1012 for f, kind, st in entries:
1002 if normalizefile:
1013 if normalizefile:
1003 # even though f might be a directory, we're only
1014 # even though f might be a directory, we're only
1004 # interested in comparing it to files currently in the
1015 # interested in comparing it to files currently in the
1005 # dmap -- therefore normalizefile is enough
1016 # dmap -- therefore normalizefile is enough
1006 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1017 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1007 True)
1018 True)
1008 else:
1019 else:
1009 nf = nd and (nd + "/" + f) or f
1020 nf = nd and (nd + "/" + f) or f
1010 if nf not in results:
1021 if nf not in results:
1011 if kind == dirkind:
1022 if kind == dirkind:
1012 if not ignore(nf):
1023 if not ignore(nf):
1013 if matchtdir:
1024 if matchtdir:
1014 matchtdir(nf)
1025 matchtdir(nf)
1015 wadd(nf)
1026 wadd(nf)
1016 if nf in dmap and (matchalways or matchfn(nf)):
1027 if nf in dmap and (matchalways or matchfn(nf)):
1017 results[nf] = None
1028 results[nf] = None
1018 elif kind == regkind or kind == lnkkind:
1029 elif kind == regkind or kind == lnkkind:
1019 if nf in dmap:
1030 if nf in dmap:
1020 if matchalways or matchfn(nf):
1031 if matchalways or matchfn(nf):
1021 results[nf] = st
1032 results[nf] = st
1022 elif ((matchalways or matchfn(nf))
1033 elif ((matchalways or matchfn(nf))
1023 and not ignore(nf)):
1034 and not ignore(nf)):
1024 # unknown file -- normalize if necessary
1035 # unknown file -- normalize if necessary
1025 if not alreadynormed:
1036 if not alreadynormed:
1026 nf = normalize(nf, False, True)
1037 nf = normalize(nf, False, True)
1027 results[nf] = st
1038 results[nf] = st
1028 elif nf in dmap and (matchalways or matchfn(nf)):
1039 elif nf in dmap and (matchalways or matchfn(nf)):
1029 results[nf] = None
1040 results[nf] = None
1030
1041
1031 for nd, d in work:
1042 for nd, d in work:
1032 # alreadynormed means that processwork doesn't have to do any
1043 # alreadynormed means that processwork doesn't have to do any
1033 # expensive directory normalization
1044 # expensive directory normalization
1034 alreadynormed = not normalize or nd == d
1045 alreadynormed = not normalize or nd == d
1035 traverse([d], alreadynormed)
1046 traverse([d], alreadynormed)
1036
1047
1037 for s in subrepos:
1048 for s in subrepos:
1038 del results[s]
1049 del results[s]
1039 del results['.hg']
1050 del results['.hg']
1040
1051
1041 # step 3: visit remaining files from dmap
1052 # step 3: visit remaining files from dmap
1042 if not skipstep3 and not exact:
1053 if not skipstep3 and not exact:
1043 # If a dmap file is not in results yet, it was either
1054 # If a dmap file is not in results yet, it was either
1044 # a) not matching matchfn b) ignored, c) missing, or d) under a
1055 # a) not matching matchfn b) ignored, c) missing, or d) under a
1045 # symlink directory.
1056 # symlink directory.
1046 if not results and matchalways:
1057 if not results and matchalways:
1047 visit = dmap.keys()
1058 visit = dmap.keys()
1048 else:
1059 else:
1049 visit = [f for f in dmap if f not in results and matchfn(f)]
1060 visit = [f for f in dmap if f not in results and matchfn(f)]
1050 visit.sort()
1061 visit.sort()
1051
1062
1052 if unknown:
1063 if unknown:
1053 # unknown == True means we walked all dirs under the roots
1064 # unknown == True means we walked all dirs under the roots
1054 # that wasn't ignored, and everything that matched was stat'ed
1065 # that wasn't ignored, and everything that matched was stat'ed
1055 # and is already in results.
1066 # and is already in results.
1056 # The rest must thus be ignored or under a symlink.
1067 # The rest must thus be ignored or under a symlink.
1057 audit_path = pathutil.pathauditor(self._root)
1068 audit_path = pathutil.pathauditor(self._root)
1058
1069
1059 for nf in iter(visit):
1070 for nf in iter(visit):
1060 # If a stat for the same file was already added with a
1071 # If a stat for the same file was already added with a
1061 # different case, don't add one for this, since that would
1072 # different case, don't add one for this, since that would
1062 # make it appear as if the file exists under both names
1073 # make it appear as if the file exists under both names
1063 # on disk.
1074 # on disk.
1064 if (normalizefile and
1075 if (normalizefile and
1065 normalizefile(nf, True, True) in results):
1076 normalizefile(nf, True, True) in results):
1066 results[nf] = None
1077 results[nf] = None
1067 # Report ignored items in the dmap as long as they are not
1078 # Report ignored items in the dmap as long as they are not
1068 # under a symlink directory.
1079 # under a symlink directory.
1069 elif audit_path.check(nf):
1080 elif audit_path.check(nf):
1070 try:
1081 try:
1071 results[nf] = lstat(join(nf))
1082 results[nf] = lstat(join(nf))
1072 # file was just ignored, no links, and exists
1083 # file was just ignored, no links, and exists
1073 except OSError:
1084 except OSError:
1074 # file doesn't exist
1085 # file doesn't exist
1075 results[nf] = None
1086 results[nf] = None
1076 else:
1087 else:
1077 # It's either missing or under a symlink directory
1088 # It's either missing or under a symlink directory
1078 # which we in this case report as missing
1089 # which we in this case report as missing
1079 results[nf] = None
1090 results[nf] = None
1080 else:
1091 else:
1081 # We may not have walked the full directory tree above,
1092 # We may not have walked the full directory tree above,
1082 # so stat and check everything we missed.
1093 # so stat and check everything we missed.
1083 nf = iter(visit).next
1094 nf = iter(visit).next
1084 for st in util.statfiles([join(i) for i in visit]):
1095 for st in util.statfiles([join(i) for i in visit]):
1085 results[nf()] = st
1096 results[nf()] = st
1086 return results
1097 return results
1087
1098
1088 def status(self, match, subrepos, ignored, clean, unknown):
1099 def status(self, match, subrepos, ignored, clean, unknown):
1089 '''Determine the status of the working copy relative to the
1100 '''Determine the status of the working copy relative to the
1090 dirstate and return a pair of (unsure, status), where status is of type
1101 dirstate and return a pair of (unsure, status), where status is of type
1091 scmutil.status and:
1102 scmutil.status and:
1092
1103
1093 unsure:
1104 unsure:
1094 files that might have been modified since the dirstate was
1105 files that might have been modified since the dirstate was
1095 written, but need to be read to be sure (size is the same
1106 written, but need to be read to be sure (size is the same
1096 but mtime differs)
1107 but mtime differs)
1097 status.modified:
1108 status.modified:
1098 files that have definitely been modified since the dirstate
1109 files that have definitely been modified since the dirstate
1099 was written (different size or mode)
1110 was written (different size or mode)
1100 status.clean:
1111 status.clean:
1101 files that have definitely not been modified since the
1112 files that have definitely not been modified since the
1102 dirstate was written
1113 dirstate was written
1103 '''
1114 '''
1104 listignored, listclean, listunknown = ignored, clean, unknown
1115 listignored, listclean, listunknown = ignored, clean, unknown
1105 lookup, modified, added, unknown, ignored = [], [], [], [], []
1116 lookup, modified, added, unknown, ignored = [], [], [], [], []
1106 removed, deleted, clean = [], [], []
1117 removed, deleted, clean = [], [], []
1107
1118
1108 dmap = self._map
1119 dmap = self._map
1109 ladd = lookup.append # aka "unsure"
1120 ladd = lookup.append # aka "unsure"
1110 madd = modified.append
1121 madd = modified.append
1111 aadd = added.append
1122 aadd = added.append
1112 uadd = unknown.append
1123 uadd = unknown.append
1113 iadd = ignored.append
1124 iadd = ignored.append
1114 radd = removed.append
1125 radd = removed.append
1115 dadd = deleted.append
1126 dadd = deleted.append
1116 cadd = clean.append
1127 cadd = clean.append
1117 mexact = match.exact
1128 mexact = match.exact
1118 dirignore = self._dirignore
1129 dirignore = self._dirignore
1119 checkexec = self._checkexec
1130 checkexec = self._checkexec
1120 copymap = self._copymap
1131 copymap = self._copymap
1121 lastnormaltime = self._lastnormaltime
1132 lastnormaltime = self._lastnormaltime
1122
1133
1123 # We need to do full walks when either
1134 # We need to do full walks when either
1124 # - we're listing all clean files, or
1135 # - we're listing all clean files, or
1125 # - match.traversedir does something, because match.traversedir should
1136 # - match.traversedir does something, because match.traversedir should
1126 # be called for every dir in the working dir
1137 # be called for every dir in the working dir
1127 full = listclean or match.traversedir is not None
1138 full = listclean or match.traversedir is not None
1128 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1139 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1129 full=full).iteritems():
1140 full=full).iteritems():
1130 if fn not in dmap:
1141 if fn not in dmap:
1131 if (listignored or mexact(fn)) and dirignore(fn):
1142 if (listignored or mexact(fn)) and dirignore(fn):
1132 if listignored:
1143 if listignored:
1133 iadd(fn)
1144 iadd(fn)
1134 else:
1145 else:
1135 uadd(fn)
1146 uadd(fn)
1136 continue
1147 continue
1137
1148
1138 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1149 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1139 # written like that for performance reasons. dmap[fn] is not a
1150 # written like that for performance reasons. dmap[fn] is not a
1140 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1151 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1141 # opcode has fast paths when the value to be unpacked is a tuple or
1152 # opcode has fast paths when the value to be unpacked is a tuple or
1142 # a list, but falls back to creating a full-fledged iterator in
1153 # a list, but falls back to creating a full-fledged iterator in
1143 # general. That is much slower than simply accessing and storing the
1154 # general. That is much slower than simply accessing and storing the
1144 # tuple members one by one.
1155 # tuple members one by one.
1145 t = dmap[fn]
1156 t = dmap[fn]
1146 state = t[0]
1157 state = t[0]
1147 mode = t[1]
1158 mode = t[1]
1148 size = t[2]
1159 size = t[2]
1149 time = t[3]
1160 time = t[3]
1150
1161
1151 if not st and state in "nma":
1162 if not st and state in "nma":
1152 dadd(fn)
1163 dadd(fn)
1153 elif state == 'n':
1164 elif state == 'n':
1154 if (size >= 0 and
1165 if (size >= 0 and
1155 ((size != st.st_size and size != st.st_size & _rangemask)
1166 ((size != st.st_size and size != st.st_size & _rangemask)
1156 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1167 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1157 or size == -2 # other parent
1168 or size == -2 # other parent
1158 or fn in copymap):
1169 or fn in copymap):
1159 madd(fn)
1170 madd(fn)
1160 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1171 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1161 ladd(fn)
1172 ladd(fn)
1162 elif st.st_mtime == lastnormaltime:
1173 elif st.st_mtime == lastnormaltime:
1163 # fn may have just been marked as normal and it may have
1174 # fn may have just been marked as normal and it may have
1164 # changed in the same second without changing its size.
1175 # changed in the same second without changing its size.
1165 # This can happen if we quickly do multiple commits.
1176 # This can happen if we quickly do multiple commits.
1166 # Force lookup, so we don't miss such a racy file change.
1177 # Force lookup, so we don't miss such a racy file change.
1167 ladd(fn)
1178 ladd(fn)
1168 elif listclean:
1179 elif listclean:
1169 cadd(fn)
1180 cadd(fn)
1170 elif state == 'm':
1181 elif state == 'm':
1171 madd(fn)
1182 madd(fn)
1172 elif state == 'a':
1183 elif state == 'a':
1173 aadd(fn)
1184 aadd(fn)
1174 elif state == 'r':
1185 elif state == 'r':
1175 radd(fn)
1186 radd(fn)
1176
1187
1177 return (lookup, scmutil.status(modified, added, removed, deleted,
1188 return (lookup, scmutil.status(modified, added, removed, deleted,
1178 unknown, ignored, clean))
1189 unknown, ignored, clean))
1179
1190
1180 def matches(self, match):
1191 def matches(self, match):
1181 '''
1192 '''
1182 return files in the dirstate (in whatever state) filtered by match
1193 return files in the dirstate (in whatever state) filtered by match
1183 '''
1194 '''
1184 dmap = self._map
1195 dmap = self._map
1185 if match.always():
1196 if match.always():
1186 return dmap.keys()
1197 return dmap.keys()
1187 files = match.files()
1198 files = match.files()
1188 if match.isexact():
1199 if match.isexact():
1189 # fast path -- filter the other way around, since typically files is
1200 # fast path -- filter the other way around, since typically files is
1190 # much smaller than dmap
1201 # much smaller than dmap
1191 return [f for f in files if f in dmap]
1202 return [f for f in files if f in dmap]
1192 if match.prefix() and all(fn in dmap for fn in files):
1203 if match.prefix() and all(fn in dmap for fn in files):
1193 # fast path -- all the values are known to be files, so just return
1204 # fast path -- all the values are known to be files, so just return
1194 # that
1205 # that
1195 return list(files)
1206 return list(files)
1196 return [f for f in dmap if match(f)]
1207 return [f for f in dmap if match(f)]
1197
1208
1198 def _actualfilename(self, tr):
1209 def _actualfilename(self, tr):
1199 if tr:
1210 if tr:
1200 return self._pendingfilename
1211 return self._pendingfilename
1201 else:
1212 else:
1202 return self._filename
1213 return self._filename
1203
1214
1204 def savebackup(self, tr, suffix='', prefix=''):
1215 def savebackup(self, tr, suffix='', prefix=''):
1205 '''Save current dirstate into backup file with suffix'''
1216 '''Save current dirstate into backup file with suffix'''
1206 assert len(suffix) > 0 or len(prefix) > 0
1217 assert len(suffix) > 0 or len(prefix) > 0
1207 filename = self._actualfilename(tr)
1218 filename = self._actualfilename(tr)
1208
1219
1209 # use '_writedirstate' instead of 'write' to write changes certainly,
1220 # use '_writedirstate' instead of 'write' to write changes certainly,
1210 # because the latter omits writing out if transaction is running.
1221 # because the latter omits writing out if transaction is running.
1211 # output file will be used to create backup of dirstate at this point.
1222 # output file will be used to create backup of dirstate at this point.
1212 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1223 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1213 checkambig=True))
1224 checkambig=True))
1214
1225
1215 if tr:
1226 if tr:
1216 # ensure that subsequent tr.writepending returns True for
1227 # ensure that subsequent tr.writepending returns True for
1217 # changes written out above, even if dirstate is never
1228 # changes written out above, even if dirstate is never
1218 # changed after this
1229 # changed after this
1219 tr.addfilegenerator('dirstate', (self._filename,),
1230 tr.addfilegenerator('dirstate', (self._filename,),
1220 self._writedirstate, location='plain')
1231 self._writedirstate, location='plain')
1221
1232
1222 # ensure that pending file written above is unlinked at
1233 # ensure that pending file written above is unlinked at
1223 # failure, even if tr.writepending isn't invoked until the
1234 # failure, even if tr.writepending isn't invoked until the
1224 # end of this transaction
1235 # end of this transaction
1225 tr.registertmp(filename, location='plain')
1236 tr.registertmp(filename, location='plain')
1226
1237
1227 self._opener.write(prefix + self._filename + suffix,
1238 self._opener.write(prefix + self._filename + suffix,
1228 self._opener.tryread(filename))
1239 self._opener.tryread(filename))
1229
1240
1230 def restorebackup(self, tr, suffix='', prefix=''):
1241 def restorebackup(self, tr, suffix='', prefix=''):
1231 '''Restore dirstate by backup file with suffix'''
1242 '''Restore dirstate by backup file with suffix'''
1232 assert len(suffix) > 0 or len(prefix) > 0
1243 assert len(suffix) > 0 or len(prefix) > 0
1233 # this "invalidate()" prevents "wlock.release()" from writing
1244 # this "invalidate()" prevents "wlock.release()" from writing
1234 # changes of dirstate out after restoring from backup file
1245 # changes of dirstate out after restoring from backup file
1235 self.invalidate()
1246 self.invalidate()
1236 filename = self._actualfilename(tr)
1247 filename = self._actualfilename(tr)
1237 # using self._filename to avoid having "pending" in the backup filename
1248 # using self._filename to avoid having "pending" in the backup filename
1238 self._opener.rename(prefix + self._filename + suffix, filename,
1249 self._opener.rename(prefix + self._filename + suffix, filename,
1239 checkambig=True)
1250 checkambig=True)
1240
1251
1241 def clearbackup(self, tr, suffix='', prefix=''):
1252 def clearbackup(self, tr, suffix='', prefix=''):
1242 '''Clear backup file with suffix'''
1253 '''Clear backup file with suffix'''
1243 assert len(suffix) > 0 or len(prefix) > 0
1254 assert len(suffix) > 0 or len(prefix) > 0
1244 # using self._filename to avoid having "pending" in the backup filename
1255 # using self._filename to avoid having "pending" in the backup filename
1245 self._opener.unlink(prefix + self._filename + suffix)
1256 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now