##// END OF EJS Templates
dirstate: make writing dirstate file out avoid ambiguity of file stat...
FUJIWARA Katsunori -
r29301:28f37ffc default
parent child Browse files
Show More
@@ -1,1251 +1,1252 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29 filecache = scmutil.filecache
29 filecache = scmutil.filecache
30 _rangemask = 0x7fffffff
30 _rangemask = 0x7fffffff
31
31
32 dirstatetuple = parsers.dirstatetuple
32 dirstatetuple = parsers.dirstatetuple
33
33
34 class repocache(filecache):
34 class repocache(filecache):
35 """filecache for files in .hg/"""
35 """filecache for files in .hg/"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj._opener.join(fname)
37 return obj._opener.join(fname)
38
38
39 class rootcache(filecache):
39 class rootcache(filecache):
40 """filecache for files in the repository root"""
40 """filecache for files in the repository root"""
41 def join(self, obj, fname):
41 def join(self, obj, fname):
42 return obj._join(fname)
42 return obj._join(fname)
43
43
44 def _getfsnow(vfs):
44 def _getfsnow(vfs):
45 '''Get "now" timestamp on filesystem'''
45 '''Get "now" timestamp on filesystem'''
46 tmpfd, tmpname = vfs.mkstemp()
46 tmpfd, tmpname = vfs.mkstemp()
47 try:
47 try:
48 return os.fstat(tmpfd).st_mtime
48 return os.fstat(tmpfd).st_mtime
49 finally:
49 finally:
50 os.close(tmpfd)
50 os.close(tmpfd)
51 vfs.unlink(tmpname)
51 vfs.unlink(tmpname)
52
52
53 def nonnormalentries(dmap):
53 def nonnormalentries(dmap):
54 '''Compute the nonnormal dirstate entries from the dmap'''
54 '''Compute the nonnormal dirstate entries from the dmap'''
55 try:
55 try:
56 return parsers.nonnormalentries(dmap)
56 return parsers.nonnormalentries(dmap)
57 except AttributeError:
57 except AttributeError:
58 return set(fname for fname, e in dmap.iteritems()
58 return set(fname for fname, e in dmap.iteritems()
59 if e[0] != 'n' or e[3] == -1)
59 if e[0] != 'n' or e[3] == -1)
60
60
61 def _trypending(root, vfs, filename):
61 def _trypending(root, vfs, filename):
62 '''Open file to be read according to HG_PENDING environment variable
62 '''Open file to be read according to HG_PENDING environment variable
63
63
64 This opens '.pending' of specified 'filename' only when HG_PENDING
64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 is equal to 'root'.
65 is equal to 'root'.
66
66
67 This returns '(fp, is_pending_opened)' tuple.
67 This returns '(fp, is_pending_opened)' tuple.
68 '''
68 '''
69 if root == os.environ.get('HG_PENDING'):
69 if root == os.environ.get('HG_PENDING'):
70 try:
70 try:
71 return (vfs('%s.pending' % filename), True)
71 return (vfs('%s.pending' % filename), True)
72 except IOError as inst:
72 except IOError as inst:
73 if inst.errno != errno.ENOENT:
73 if inst.errno != errno.ENOENT:
74 raise
74 raise
75 return (vfs(filename), False)
75 return (vfs(filename), False)
76
76
77 _token = object()
77 _token = object()
78
78
79 class dirstate(object):
79 class dirstate(object):
80
80
81 def __init__(self, opener, ui, root, validate):
81 def __init__(self, opener, ui, root, validate):
82 '''Create a new dirstate object.
82 '''Create a new dirstate object.
83
83
84 opener is an open()-like callable that can be used to open the
84 opener is an open()-like callable that can be used to open the
85 dirstate file; root is the root of the directory tracked by
85 dirstate file; root is the root of the directory tracked by
86 the dirstate.
86 the dirstate.
87 '''
87 '''
88 self._opener = opener
88 self._opener = opener
89 self._validate = validate
89 self._validate = validate
90 self._root = root
90 self._root = root
91 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
92 # UNC path pointing to root share (issue4557)
92 # UNC path pointing to root share (issue4557)
93 self._rootdir = pathutil.normasprefix(root)
93 self._rootdir = pathutil.normasprefix(root)
94 # internal config: ui.forcecwd
94 # internal config: ui.forcecwd
95 forcecwd = ui.config('ui', 'forcecwd')
95 forcecwd = ui.config('ui', 'forcecwd')
96 if forcecwd:
96 if forcecwd:
97 self._cwd = forcecwd
97 self._cwd = forcecwd
98 self._dirty = False
98 self._dirty = False
99 self._dirtypl = False
99 self._dirtypl = False
100 self._lastnormaltime = 0
100 self._lastnormaltime = 0
101 self._ui = ui
101 self._ui = ui
102 self._filecache = {}
102 self._filecache = {}
103 self._parentwriters = 0
103 self._parentwriters = 0
104 self._filename = 'dirstate'
104 self._filename = 'dirstate'
105 self._pendingfilename = '%s.pending' % self._filename
105 self._pendingfilename = '%s.pending' % self._filename
106
106
107 # for consistent view between _pl() and _read() invocations
107 # for consistent view between _pl() and _read() invocations
108 self._pendingmode = None
108 self._pendingmode = None
109
109
110 def beginparentchange(self):
110 def beginparentchange(self):
111 '''Marks the beginning of a set of changes that involve changing
111 '''Marks the beginning of a set of changes that involve changing
112 the dirstate parents. If there is an exception during this time,
112 the dirstate parents. If there is an exception during this time,
113 the dirstate will not be written when the wlock is released. This
113 the dirstate will not be written when the wlock is released. This
114 prevents writing an incoherent dirstate where the parent doesn't
114 prevents writing an incoherent dirstate where the parent doesn't
115 match the contents.
115 match the contents.
116 '''
116 '''
117 self._parentwriters += 1
117 self._parentwriters += 1
118
118
119 def endparentchange(self):
119 def endparentchange(self):
120 '''Marks the end of a set of changes that involve changing the
120 '''Marks the end of a set of changes that involve changing the
121 dirstate parents. Once all parent changes have been marked done,
121 dirstate parents. Once all parent changes have been marked done,
122 the wlock will be free to write the dirstate on release.
122 the wlock will be free to write the dirstate on release.
123 '''
123 '''
124 if self._parentwriters > 0:
124 if self._parentwriters > 0:
125 self._parentwriters -= 1
125 self._parentwriters -= 1
126
126
127 def pendingparentchange(self):
127 def pendingparentchange(self):
128 '''Returns true if the dirstate is in the middle of a set of changes
128 '''Returns true if the dirstate is in the middle of a set of changes
129 that modify the dirstate parent.
129 that modify the dirstate parent.
130 '''
130 '''
131 return self._parentwriters > 0
131 return self._parentwriters > 0
132
132
133 @propertycache
133 @propertycache
134 def _map(self):
134 def _map(self):
135 '''Return the dirstate contents as a map from filename to
135 '''Return the dirstate contents as a map from filename to
136 (state, mode, size, time).'''
136 (state, mode, size, time).'''
137 self._read()
137 self._read()
138 return self._map
138 return self._map
139
139
140 @propertycache
140 @propertycache
141 def _copymap(self):
141 def _copymap(self):
142 self._read()
142 self._read()
143 return self._copymap
143 return self._copymap
144
144
145 @propertycache
145 @propertycache
146 def _nonnormalset(self):
146 def _nonnormalset(self):
147 return nonnormalentries(self._map)
147 return nonnormalentries(self._map)
148
148
149 @propertycache
149 @propertycache
150 def _filefoldmap(self):
150 def _filefoldmap(self):
151 try:
151 try:
152 makefilefoldmap = parsers.make_file_foldmap
152 makefilefoldmap = parsers.make_file_foldmap
153 except AttributeError:
153 except AttributeError:
154 pass
154 pass
155 else:
155 else:
156 return makefilefoldmap(self._map, util.normcasespec,
156 return makefilefoldmap(self._map, util.normcasespec,
157 util.normcasefallback)
157 util.normcasefallback)
158
158
159 f = {}
159 f = {}
160 normcase = util.normcase
160 normcase = util.normcase
161 for name, s in self._map.iteritems():
161 for name, s in self._map.iteritems():
162 if s[0] != 'r':
162 if s[0] != 'r':
163 f[normcase(name)] = name
163 f[normcase(name)] = name
164 f['.'] = '.' # prevents useless util.fspath() invocation
164 f['.'] = '.' # prevents useless util.fspath() invocation
165 return f
165 return f
166
166
167 @propertycache
167 @propertycache
168 def _dirfoldmap(self):
168 def _dirfoldmap(self):
169 f = {}
169 f = {}
170 normcase = util.normcase
170 normcase = util.normcase
171 for name in self._dirs:
171 for name in self._dirs:
172 f[normcase(name)] = name
172 f[normcase(name)] = name
173 return f
173 return f
174
174
175 @repocache('branch')
175 @repocache('branch')
176 def _branch(self):
176 def _branch(self):
177 try:
177 try:
178 return self._opener.read("branch").strip() or "default"
178 return self._opener.read("branch").strip() or "default"
179 except IOError as inst:
179 except IOError as inst:
180 if inst.errno != errno.ENOENT:
180 if inst.errno != errno.ENOENT:
181 raise
181 raise
182 return "default"
182 return "default"
183
183
184 @propertycache
184 @propertycache
185 def _pl(self):
185 def _pl(self):
186 try:
186 try:
187 fp = self._opendirstatefile()
187 fp = self._opendirstatefile()
188 st = fp.read(40)
188 st = fp.read(40)
189 fp.close()
189 fp.close()
190 l = len(st)
190 l = len(st)
191 if l == 40:
191 if l == 40:
192 return st[:20], st[20:40]
192 return st[:20], st[20:40]
193 elif l > 0 and l < 40:
193 elif l > 0 and l < 40:
194 raise error.Abort(_('working directory state appears damaged!'))
194 raise error.Abort(_('working directory state appears damaged!'))
195 except IOError as err:
195 except IOError as err:
196 if err.errno != errno.ENOENT:
196 if err.errno != errno.ENOENT:
197 raise
197 raise
198 return [nullid, nullid]
198 return [nullid, nullid]
199
199
200 @propertycache
200 @propertycache
201 def _dirs(self):
201 def _dirs(self):
202 return util.dirs(self._map, 'r')
202 return util.dirs(self._map, 'r')
203
203
204 def dirs(self):
204 def dirs(self):
205 return self._dirs
205 return self._dirs
206
206
207 @rootcache('.hgignore')
207 @rootcache('.hgignore')
208 def _ignore(self):
208 def _ignore(self):
209 files = self._ignorefiles()
209 files = self._ignorefiles()
210 if not files:
210 if not files:
211 return util.never
211 return util.never
212
212
213 pats = ['include:%s' % f for f in files]
213 pats = ['include:%s' % f for f in files]
214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215
215
216 @propertycache
216 @propertycache
217 def _slash(self):
217 def _slash(self):
218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
219
219
220 @propertycache
220 @propertycache
221 def _checklink(self):
221 def _checklink(self):
222 return util.checklink(self._root)
222 return util.checklink(self._root)
223
223
224 @propertycache
224 @propertycache
225 def _checkexec(self):
225 def _checkexec(self):
226 return util.checkexec(self._root)
226 return util.checkexec(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkcase(self):
229 def _checkcase(self):
230 return not util.checkcase(self._join('.hg'))
230 return not util.checkcase(self._join('.hg'))
231
231
232 def _join(self, f):
232 def _join(self, f):
233 # much faster than os.path.join()
233 # much faster than os.path.join()
234 # it's safe because f is always a relative path
234 # it's safe because f is always a relative path
235 return self._rootdir + f
235 return self._rootdir + f
236
236
237 def flagfunc(self, buildfallback):
237 def flagfunc(self, buildfallback):
238 if self._checklink and self._checkexec:
238 if self._checklink and self._checkexec:
239 def f(x):
239 def f(x):
240 try:
240 try:
241 st = os.lstat(self._join(x))
241 st = os.lstat(self._join(x))
242 if util.statislink(st):
242 if util.statislink(st):
243 return 'l'
243 return 'l'
244 if util.statisexec(st):
244 if util.statisexec(st):
245 return 'x'
245 return 'x'
246 except OSError:
246 except OSError:
247 pass
247 pass
248 return ''
248 return ''
249 return f
249 return f
250
250
251 fallback = buildfallback()
251 fallback = buildfallback()
252 if self._checklink:
252 if self._checklink:
253 def f(x):
253 def f(x):
254 if os.path.islink(self._join(x)):
254 if os.path.islink(self._join(x)):
255 return 'l'
255 return 'l'
256 if 'x' in fallback(x):
256 if 'x' in fallback(x):
257 return 'x'
257 return 'x'
258 return ''
258 return ''
259 return f
259 return f
260 if self._checkexec:
260 if self._checkexec:
261 def f(x):
261 def f(x):
262 if 'l' in fallback(x):
262 if 'l' in fallback(x):
263 return 'l'
263 return 'l'
264 if util.isexec(self._join(x)):
264 if util.isexec(self._join(x)):
265 return 'x'
265 return 'x'
266 return ''
266 return ''
267 return f
267 return f
268 else:
268 else:
269 return fallback
269 return fallback
270
270
271 @propertycache
271 @propertycache
272 def _cwd(self):
272 def _cwd(self):
273 return os.getcwd()
273 return os.getcwd()
274
274
275 def getcwd(self):
275 def getcwd(self):
276 '''Return the path from which a canonical path is calculated.
276 '''Return the path from which a canonical path is calculated.
277
277
278 This path should be used to resolve file patterns or to convert
278 This path should be used to resolve file patterns or to convert
279 canonical paths back to file paths for display. It shouldn't be
279 canonical paths back to file paths for display. It shouldn't be
280 used to get real file paths. Use vfs functions instead.
280 used to get real file paths. Use vfs functions instead.
281 '''
281 '''
282 cwd = self._cwd
282 cwd = self._cwd
283 if cwd == self._root:
283 if cwd == self._root:
284 return ''
284 return ''
285 # self._root ends with a path separator if self._root is '/' or 'C:\'
285 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 rootsep = self._root
286 rootsep = self._root
287 if not util.endswithsep(rootsep):
287 if not util.endswithsep(rootsep):
288 rootsep += os.sep
288 rootsep += os.sep
289 if cwd.startswith(rootsep):
289 if cwd.startswith(rootsep):
290 return cwd[len(rootsep):]
290 return cwd[len(rootsep):]
291 else:
291 else:
292 # we're outside the repo. return an absolute path.
292 # we're outside the repo. return an absolute path.
293 return cwd
293 return cwd
294
294
295 def pathto(self, f, cwd=None):
295 def pathto(self, f, cwd=None):
296 if cwd is None:
296 if cwd is None:
297 cwd = self.getcwd()
297 cwd = self.getcwd()
298 path = util.pathto(self._root, cwd, f)
298 path = util.pathto(self._root, cwd, f)
299 if self._slash:
299 if self._slash:
300 return util.pconvert(path)
300 return util.pconvert(path)
301 return path
301 return path
302
302
303 def __getitem__(self, key):
303 def __getitem__(self, key):
304 '''Return the current state of key (a filename) in the dirstate.
304 '''Return the current state of key (a filename) in the dirstate.
305
305
306 States are:
306 States are:
307 n normal
307 n normal
308 m needs merging
308 m needs merging
309 r marked for removal
309 r marked for removal
310 a marked for addition
310 a marked for addition
311 ? not tracked
311 ? not tracked
312 '''
312 '''
313 return self._map.get(key, ("?",))[0]
313 return self._map.get(key, ("?",))[0]
314
314
315 def __contains__(self, key):
315 def __contains__(self, key):
316 return key in self._map
316 return key in self._map
317
317
318 def __iter__(self):
318 def __iter__(self):
319 for x in sorted(self._map):
319 for x in sorted(self._map):
320 yield x
320 yield x
321
321
322 def iteritems(self):
322 def iteritems(self):
323 return self._map.iteritems()
323 return self._map.iteritems()
324
324
325 def parents(self):
325 def parents(self):
326 return [self._validate(p) for p in self._pl]
326 return [self._validate(p) for p in self._pl]
327
327
328 def p1(self):
328 def p1(self):
329 return self._validate(self._pl[0])
329 return self._validate(self._pl[0])
330
330
331 def p2(self):
331 def p2(self):
332 return self._validate(self._pl[1])
332 return self._validate(self._pl[1])
333
333
334 def branch(self):
334 def branch(self):
335 return encoding.tolocal(self._branch)
335 return encoding.tolocal(self._branch)
336
336
337 def setparents(self, p1, p2=nullid):
337 def setparents(self, p1, p2=nullid):
338 """Set dirstate parents to p1 and p2.
338 """Set dirstate parents to p1 and p2.
339
339
340 When moving from two parents to one, 'm' merged entries a
340 When moving from two parents to one, 'm' merged entries a
341 adjusted to normal and previous copy records discarded and
341 adjusted to normal and previous copy records discarded and
342 returned by the call.
342 returned by the call.
343
343
344 See localrepo.setparents()
344 See localrepo.setparents()
345 """
345 """
346 if self._parentwriters == 0:
346 if self._parentwriters == 0:
347 raise ValueError("cannot set dirstate parent without "
347 raise ValueError("cannot set dirstate parent without "
348 "calling dirstate.beginparentchange")
348 "calling dirstate.beginparentchange")
349
349
350 self._dirty = self._dirtypl = True
350 self._dirty = self._dirtypl = True
351 oldp2 = self._pl[1]
351 oldp2 = self._pl[1]
352 self._pl = p1, p2
352 self._pl = p1, p2
353 copies = {}
353 copies = {}
354 if oldp2 != nullid and p2 == nullid:
354 if oldp2 != nullid and p2 == nullid:
355 for f, s in self._map.iteritems():
355 for f, s in self._map.iteritems():
356 # Discard 'm' markers when moving away from a merge state
356 # Discard 'm' markers when moving away from a merge state
357 if s[0] == 'm':
357 if s[0] == 'm':
358 if f in self._copymap:
358 if f in self._copymap:
359 copies[f] = self._copymap[f]
359 copies[f] = self._copymap[f]
360 self.normallookup(f)
360 self.normallookup(f)
361 # Also fix up otherparent markers
361 # Also fix up otherparent markers
362 elif s[0] == 'n' and s[2] == -2:
362 elif s[0] == 'n' and s[2] == -2:
363 if f in self._copymap:
363 if f in self._copymap:
364 copies[f] = self._copymap[f]
364 copies[f] = self._copymap[f]
365 self.add(f)
365 self.add(f)
366 return copies
366 return copies
367
367
368 def setbranch(self, branch):
368 def setbranch(self, branch):
369 self._branch = encoding.fromlocal(branch)
369 self._branch = encoding.fromlocal(branch)
370 f = self._opener('branch', 'w', atomictemp=True)
370 f = self._opener('branch', 'w', atomictemp=True)
371 try:
371 try:
372 f.write(self._branch + '\n')
372 f.write(self._branch + '\n')
373 f.close()
373 f.close()
374
374
375 # make sure filecache has the correct stat info for _branch after
375 # make sure filecache has the correct stat info for _branch after
376 # replacing the underlying file
376 # replacing the underlying file
377 ce = self._filecache['_branch']
377 ce = self._filecache['_branch']
378 if ce:
378 if ce:
379 ce.refresh()
379 ce.refresh()
380 except: # re-raises
380 except: # re-raises
381 f.discard()
381 f.discard()
382 raise
382 raise
383
383
384 def _opendirstatefile(self):
384 def _opendirstatefile(self):
385 fp, mode = _trypending(self._root, self._opener, self._filename)
385 fp, mode = _trypending(self._root, self._opener, self._filename)
386 if self._pendingmode is not None and self._pendingmode != mode:
386 if self._pendingmode is not None and self._pendingmode != mode:
387 fp.close()
387 fp.close()
388 raise error.Abort(_('working directory state may be '
388 raise error.Abort(_('working directory state may be '
389 'changed parallelly'))
389 'changed parallelly'))
390 self._pendingmode = mode
390 self._pendingmode = mode
391 return fp
391 return fp
392
392
393 def _read(self):
393 def _read(self):
394 self._map = {}
394 self._map = {}
395 self._copymap = {}
395 self._copymap = {}
396 try:
396 try:
397 fp = self._opendirstatefile()
397 fp = self._opendirstatefile()
398 try:
398 try:
399 st = fp.read()
399 st = fp.read()
400 finally:
400 finally:
401 fp.close()
401 fp.close()
402 except IOError as err:
402 except IOError as err:
403 if err.errno != errno.ENOENT:
403 if err.errno != errno.ENOENT:
404 raise
404 raise
405 return
405 return
406 if not st:
406 if not st:
407 return
407 return
408
408
409 if util.safehasattr(parsers, 'dict_new_presized'):
409 if util.safehasattr(parsers, 'dict_new_presized'):
410 # Make an estimate of the number of files in the dirstate based on
410 # Make an estimate of the number of files in the dirstate based on
411 # its size. From a linear regression on a set of real-world repos,
411 # its size. From a linear regression on a set of real-world repos,
412 # all over 10,000 files, the size of a dirstate entry is 85
412 # all over 10,000 files, the size of a dirstate entry is 85
413 # bytes. The cost of resizing is significantly higher than the cost
413 # bytes. The cost of resizing is significantly higher than the cost
414 # of filling in a larger presized dict, so subtract 20% from the
414 # of filling in a larger presized dict, so subtract 20% from the
415 # size.
415 # size.
416 #
416 #
417 # This heuristic is imperfect in many ways, so in a future dirstate
417 # This heuristic is imperfect in many ways, so in a future dirstate
418 # format update it makes sense to just record the number of entries
418 # format update it makes sense to just record the number of entries
419 # on write.
419 # on write.
420 self._map = parsers.dict_new_presized(len(st) / 71)
420 self._map = parsers.dict_new_presized(len(st) / 71)
421
421
422 # Python's garbage collector triggers a GC each time a certain number
422 # Python's garbage collector triggers a GC each time a certain number
423 # of container objects (the number being defined by
423 # of container objects (the number being defined by
424 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
424 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
425 # for each file in the dirstate. The C version then immediately marks
425 # for each file in the dirstate. The C version then immediately marks
426 # them as not to be tracked by the collector. However, this has no
426 # them as not to be tracked by the collector. However, this has no
427 # effect on when GCs are triggered, only on what objects the GC looks
427 # effect on when GCs are triggered, only on what objects the GC looks
428 # into. This means that O(number of files) GCs are unavoidable.
428 # into. This means that O(number of files) GCs are unavoidable.
429 # Depending on when in the process's lifetime the dirstate is parsed,
429 # Depending on when in the process's lifetime the dirstate is parsed,
430 # this can get very expensive. As a workaround, disable GC while
430 # this can get very expensive. As a workaround, disable GC while
431 # parsing the dirstate.
431 # parsing the dirstate.
432 #
432 #
433 # (we cannot decorate the function directly since it is in a C module)
433 # (we cannot decorate the function directly since it is in a C module)
434 parse_dirstate = util.nogc(parsers.parse_dirstate)
434 parse_dirstate = util.nogc(parsers.parse_dirstate)
435 p = parse_dirstate(self._map, self._copymap, st)
435 p = parse_dirstate(self._map, self._copymap, st)
436 if not self._dirtypl:
436 if not self._dirtypl:
437 self._pl = p
437 self._pl = p
438
438
439 def invalidate(self):
439 def invalidate(self):
440 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
440 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
441 "_pl", "_dirs", "_ignore", "_nonnormalset"):
441 "_pl", "_dirs", "_ignore", "_nonnormalset"):
442 if a in self.__dict__:
442 if a in self.__dict__:
443 delattr(self, a)
443 delattr(self, a)
444 self._lastnormaltime = 0
444 self._lastnormaltime = 0
445 self._dirty = False
445 self._dirty = False
446 self._parentwriters = 0
446 self._parentwriters = 0
447
447
448 def copy(self, source, dest):
448 def copy(self, source, dest):
449 """Mark dest as a copy of source. Unmark dest if source is None."""
449 """Mark dest as a copy of source. Unmark dest if source is None."""
450 if source == dest:
450 if source == dest:
451 return
451 return
452 self._dirty = True
452 self._dirty = True
453 if source is not None:
453 if source is not None:
454 self._copymap[dest] = source
454 self._copymap[dest] = source
455 elif dest in self._copymap:
455 elif dest in self._copymap:
456 del self._copymap[dest]
456 del self._copymap[dest]
457
457
458 def copied(self, file):
458 def copied(self, file):
459 return self._copymap.get(file, None)
459 return self._copymap.get(file, None)
460
460
461 def copies(self):
461 def copies(self):
462 return self._copymap
462 return self._copymap
463
463
464 def _droppath(self, f):
464 def _droppath(self, f):
465 if self[f] not in "?r" and "_dirs" in self.__dict__:
465 if self[f] not in "?r" and "_dirs" in self.__dict__:
466 self._dirs.delpath(f)
466 self._dirs.delpath(f)
467
467
468 if "_filefoldmap" in self.__dict__:
468 if "_filefoldmap" in self.__dict__:
469 normed = util.normcase(f)
469 normed = util.normcase(f)
470 if normed in self._filefoldmap:
470 if normed in self._filefoldmap:
471 del self._filefoldmap[normed]
471 del self._filefoldmap[normed]
472
472
473 def _addpath(self, f, state, mode, size, mtime):
473 def _addpath(self, f, state, mode, size, mtime):
474 oldstate = self[f]
474 oldstate = self[f]
475 if state == 'a' or oldstate == 'r':
475 if state == 'a' or oldstate == 'r':
476 scmutil.checkfilename(f)
476 scmutil.checkfilename(f)
477 if f in self._dirs:
477 if f in self._dirs:
478 raise error.Abort(_('directory %r already in dirstate') % f)
478 raise error.Abort(_('directory %r already in dirstate') % f)
479 # shadows
479 # shadows
480 for d in util.finddirs(f):
480 for d in util.finddirs(f):
481 if d in self._dirs:
481 if d in self._dirs:
482 break
482 break
483 if d in self._map and self[d] != 'r':
483 if d in self._map and self[d] != 'r':
484 raise error.Abort(
484 raise error.Abort(
485 _('file %r in dirstate clashes with %r') % (d, f))
485 _('file %r in dirstate clashes with %r') % (d, f))
486 if oldstate in "?r" and "_dirs" in self.__dict__:
486 if oldstate in "?r" and "_dirs" in self.__dict__:
487 self._dirs.addpath(f)
487 self._dirs.addpath(f)
488 self._dirty = True
488 self._dirty = True
489 self._map[f] = dirstatetuple(state, mode, size, mtime)
489 self._map[f] = dirstatetuple(state, mode, size, mtime)
490 if state != 'n' or mtime == -1:
490 if state != 'n' or mtime == -1:
491 self._nonnormalset.add(f)
491 self._nonnormalset.add(f)
492
492
493 def normal(self, f):
493 def normal(self, f):
494 '''Mark a file normal and clean.'''
494 '''Mark a file normal and clean.'''
495 s = os.lstat(self._join(f))
495 s = os.lstat(self._join(f))
496 mtime = s.st_mtime
496 mtime = s.st_mtime
497 self._addpath(f, 'n', s.st_mode,
497 self._addpath(f, 'n', s.st_mode,
498 s.st_size & _rangemask, mtime & _rangemask)
498 s.st_size & _rangemask, mtime & _rangemask)
499 if f in self._copymap:
499 if f in self._copymap:
500 del self._copymap[f]
500 del self._copymap[f]
501 if f in self._nonnormalset:
501 if f in self._nonnormalset:
502 self._nonnormalset.remove(f)
502 self._nonnormalset.remove(f)
503 if mtime > self._lastnormaltime:
503 if mtime > self._lastnormaltime:
504 # Remember the most recent modification timeslot for status(),
504 # Remember the most recent modification timeslot for status(),
505 # to make sure we won't miss future size-preserving file content
505 # to make sure we won't miss future size-preserving file content
506 # modifications that happen within the same timeslot.
506 # modifications that happen within the same timeslot.
507 self._lastnormaltime = mtime
507 self._lastnormaltime = mtime
508
508
509 def normallookup(self, f):
509 def normallookup(self, f):
510 '''Mark a file normal, but possibly dirty.'''
510 '''Mark a file normal, but possibly dirty.'''
511 if self._pl[1] != nullid and f in self._map:
511 if self._pl[1] != nullid and f in self._map:
512 # if there is a merge going on and the file was either
512 # if there is a merge going on and the file was either
513 # in state 'm' (-1) or coming from other parent (-2) before
513 # in state 'm' (-1) or coming from other parent (-2) before
514 # being removed, restore that state.
514 # being removed, restore that state.
515 entry = self._map[f]
515 entry = self._map[f]
516 if entry[0] == 'r' and entry[2] in (-1, -2):
516 if entry[0] == 'r' and entry[2] in (-1, -2):
517 source = self._copymap.get(f)
517 source = self._copymap.get(f)
518 if entry[2] == -1:
518 if entry[2] == -1:
519 self.merge(f)
519 self.merge(f)
520 elif entry[2] == -2:
520 elif entry[2] == -2:
521 self.otherparent(f)
521 self.otherparent(f)
522 if source:
522 if source:
523 self.copy(source, f)
523 self.copy(source, f)
524 return
524 return
525 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
525 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
526 return
526 return
527 self._addpath(f, 'n', 0, -1, -1)
527 self._addpath(f, 'n', 0, -1, -1)
528 if f in self._copymap:
528 if f in self._copymap:
529 del self._copymap[f]
529 del self._copymap[f]
530 if f in self._nonnormalset:
530 if f in self._nonnormalset:
531 self._nonnormalset.remove(f)
531 self._nonnormalset.remove(f)
532
532
533 def otherparent(self, f):
533 def otherparent(self, f):
534 '''Mark as coming from the other parent, always dirty.'''
534 '''Mark as coming from the other parent, always dirty.'''
535 if self._pl[1] == nullid:
535 if self._pl[1] == nullid:
536 raise error.Abort(_("setting %r to other parent "
536 raise error.Abort(_("setting %r to other parent "
537 "only allowed in merges") % f)
537 "only allowed in merges") % f)
538 if f in self and self[f] == 'n':
538 if f in self and self[f] == 'n':
539 # merge-like
539 # merge-like
540 self._addpath(f, 'm', 0, -2, -1)
540 self._addpath(f, 'm', 0, -2, -1)
541 else:
541 else:
542 # add-like
542 # add-like
543 self._addpath(f, 'n', 0, -2, -1)
543 self._addpath(f, 'n', 0, -2, -1)
544
544
545 if f in self._copymap:
545 if f in self._copymap:
546 del self._copymap[f]
546 del self._copymap[f]
547
547
548 def add(self, f):
548 def add(self, f):
549 '''Mark a file added.'''
549 '''Mark a file added.'''
550 self._addpath(f, 'a', 0, -1, -1)
550 self._addpath(f, 'a', 0, -1, -1)
551 if f in self._copymap:
551 if f in self._copymap:
552 del self._copymap[f]
552 del self._copymap[f]
553
553
554 def remove(self, f):
554 def remove(self, f):
555 '''Mark a file removed.'''
555 '''Mark a file removed.'''
556 self._dirty = True
556 self._dirty = True
557 self._droppath(f)
557 self._droppath(f)
558 size = 0
558 size = 0
559 if self._pl[1] != nullid and f in self._map:
559 if self._pl[1] != nullid and f in self._map:
560 # backup the previous state
560 # backup the previous state
561 entry = self._map[f]
561 entry = self._map[f]
562 if entry[0] == 'm': # merge
562 if entry[0] == 'm': # merge
563 size = -1
563 size = -1
564 elif entry[0] == 'n' and entry[2] == -2: # other parent
564 elif entry[0] == 'n' and entry[2] == -2: # other parent
565 size = -2
565 size = -2
566 self._map[f] = dirstatetuple('r', 0, size, 0)
566 self._map[f] = dirstatetuple('r', 0, size, 0)
567 self._nonnormalset.add(f)
567 self._nonnormalset.add(f)
568 if size == 0 and f in self._copymap:
568 if size == 0 and f in self._copymap:
569 del self._copymap[f]
569 del self._copymap[f]
570
570
571 def merge(self, f):
571 def merge(self, f):
572 '''Mark a file merged.'''
572 '''Mark a file merged.'''
573 if self._pl[1] == nullid:
573 if self._pl[1] == nullid:
574 return self.normallookup(f)
574 return self.normallookup(f)
575 return self.otherparent(f)
575 return self.otherparent(f)
576
576
577 def drop(self, f):
577 def drop(self, f):
578 '''Drop a file from the dirstate'''
578 '''Drop a file from the dirstate'''
579 if f in self._map:
579 if f in self._map:
580 self._dirty = True
580 self._dirty = True
581 self._droppath(f)
581 self._droppath(f)
582 del self._map[f]
582 del self._map[f]
583 if f in self._nonnormalset:
583 if f in self._nonnormalset:
584 self._nonnormalset.remove(f)
584 self._nonnormalset.remove(f)
585 if f in self._copymap:
585 if f in self._copymap:
586 del self._copymap[f]
586 del self._copymap[f]
587
587
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
589 if exists is None:
589 if exists is None:
590 exists = os.path.lexists(os.path.join(self._root, path))
590 exists = os.path.lexists(os.path.join(self._root, path))
591 if not exists:
591 if not exists:
592 # Maybe a path component exists
592 # Maybe a path component exists
593 if not ignoremissing and '/' in path:
593 if not ignoremissing and '/' in path:
594 d, f = path.rsplit('/', 1)
594 d, f = path.rsplit('/', 1)
595 d = self._normalize(d, False, ignoremissing, None)
595 d = self._normalize(d, False, ignoremissing, None)
596 folded = d + "/" + f
596 folded = d + "/" + f
597 else:
597 else:
598 # No path components, preserve original case
598 # No path components, preserve original case
599 folded = path
599 folded = path
600 else:
600 else:
601 # recursively normalize leading directory components
601 # recursively normalize leading directory components
602 # against dirstate
602 # against dirstate
603 if '/' in normed:
603 if '/' in normed:
604 d, f = normed.rsplit('/', 1)
604 d, f = normed.rsplit('/', 1)
605 d = self._normalize(d, False, ignoremissing, True)
605 d = self._normalize(d, False, ignoremissing, True)
606 r = self._root + "/" + d
606 r = self._root + "/" + d
607 folded = d + "/" + util.fspath(f, r)
607 folded = d + "/" + util.fspath(f, r)
608 else:
608 else:
609 folded = util.fspath(normed, self._root)
609 folded = util.fspath(normed, self._root)
610 storemap[normed] = folded
610 storemap[normed] = folded
611
611
612 return folded
612 return folded
613
613
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
615 normed = util.normcase(path)
615 normed = util.normcase(path)
616 folded = self._filefoldmap.get(normed, None)
616 folded = self._filefoldmap.get(normed, None)
617 if folded is None:
617 if folded is None:
618 if isknown:
618 if isknown:
619 folded = path
619 folded = path
620 else:
620 else:
621 folded = self._discoverpath(path, normed, ignoremissing, exists,
621 folded = self._discoverpath(path, normed, ignoremissing, exists,
622 self._filefoldmap)
622 self._filefoldmap)
623 return folded
623 return folded
624
624
625 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
625 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
626 normed = util.normcase(path)
626 normed = util.normcase(path)
627 folded = self._filefoldmap.get(normed, None)
627 folded = self._filefoldmap.get(normed, None)
628 if folded is None:
628 if folded is None:
629 folded = self._dirfoldmap.get(normed, None)
629 folded = self._dirfoldmap.get(normed, None)
630 if folded is None:
630 if folded is None:
631 if isknown:
631 if isknown:
632 folded = path
632 folded = path
633 else:
633 else:
634 # store discovered result in dirfoldmap so that future
634 # store discovered result in dirfoldmap so that future
635 # normalizefile calls don't start matching directories
635 # normalizefile calls don't start matching directories
636 folded = self._discoverpath(path, normed, ignoremissing, exists,
636 folded = self._discoverpath(path, normed, ignoremissing, exists,
637 self._dirfoldmap)
637 self._dirfoldmap)
638 return folded
638 return folded
639
639
640 def normalize(self, path, isknown=False, ignoremissing=False):
640 def normalize(self, path, isknown=False, ignoremissing=False):
641 '''
641 '''
642 normalize the case of a pathname when on a casefolding filesystem
642 normalize the case of a pathname when on a casefolding filesystem
643
643
644 isknown specifies whether the filename came from walking the
644 isknown specifies whether the filename came from walking the
645 disk, to avoid extra filesystem access.
645 disk, to avoid extra filesystem access.
646
646
647 If ignoremissing is True, missing path are returned
647 If ignoremissing is True, missing path are returned
648 unchanged. Otherwise, we try harder to normalize possibly
648 unchanged. Otherwise, we try harder to normalize possibly
649 existing path components.
649 existing path components.
650
650
651 The normalized case is determined based on the following precedence:
651 The normalized case is determined based on the following precedence:
652
652
653 - version of name already stored in the dirstate
653 - version of name already stored in the dirstate
654 - version of name stored on disk
654 - version of name stored on disk
655 - version provided via command arguments
655 - version provided via command arguments
656 '''
656 '''
657
657
658 if self._checkcase:
658 if self._checkcase:
659 return self._normalize(path, isknown, ignoremissing)
659 return self._normalize(path, isknown, ignoremissing)
660 return path
660 return path
661
661
662 def clear(self):
662 def clear(self):
663 self._map = {}
663 self._map = {}
664 self._nonnormalset = set()
664 self._nonnormalset = set()
665 if "_dirs" in self.__dict__:
665 if "_dirs" in self.__dict__:
666 delattr(self, "_dirs")
666 delattr(self, "_dirs")
667 self._copymap = {}
667 self._copymap = {}
668 self._pl = [nullid, nullid]
668 self._pl = [nullid, nullid]
669 self._lastnormaltime = 0
669 self._lastnormaltime = 0
670 self._dirty = True
670 self._dirty = True
671
671
672 def rebuild(self, parent, allfiles, changedfiles=None):
672 def rebuild(self, parent, allfiles, changedfiles=None):
673 if changedfiles is None:
673 if changedfiles is None:
674 # Rebuild entire dirstate
674 # Rebuild entire dirstate
675 changedfiles = allfiles
675 changedfiles = allfiles
676 lastnormaltime = self._lastnormaltime
676 lastnormaltime = self._lastnormaltime
677 self.clear()
677 self.clear()
678 self._lastnormaltime = lastnormaltime
678 self._lastnormaltime = lastnormaltime
679
679
680 for f in changedfiles:
680 for f in changedfiles:
681 mode = 0o666
681 mode = 0o666
682 if f in allfiles and 'x' in allfiles.flags(f):
682 if f in allfiles and 'x' in allfiles.flags(f):
683 mode = 0o777
683 mode = 0o777
684
684
685 if f in allfiles:
685 if f in allfiles:
686 self._map[f] = dirstatetuple('n', mode, -1, 0)
686 self._map[f] = dirstatetuple('n', mode, -1, 0)
687 else:
687 else:
688 self._map.pop(f, None)
688 self._map.pop(f, None)
689 if f in self._nonnormalset:
689 if f in self._nonnormalset:
690 self._nonnormalset.remove(f)
690 self._nonnormalset.remove(f)
691
691
692 self._pl = (parent, nullid)
692 self._pl = (parent, nullid)
693 self._dirty = True
693 self._dirty = True
694
694
695 def write(self, tr=_token):
695 def write(self, tr=_token):
696 if not self._dirty:
696 if not self._dirty:
697 return
697 return
698
698
699 filename = self._filename
699 filename = self._filename
700 if tr is _token: # not explicitly specified
700 if tr is _token: # not explicitly specified
701 self._ui.deprecwarn('use dirstate.write with '
701 self._ui.deprecwarn('use dirstate.write with '
702 'repo.currenttransaction()',
702 'repo.currenttransaction()',
703 '3.9')
703 '3.9')
704
704
705 if self._opener.lexists(self._pendingfilename):
705 if self._opener.lexists(self._pendingfilename):
706 # if pending file already exists, in-memory changes
706 # if pending file already exists, in-memory changes
707 # should be written into it, because it has priority
707 # should be written into it, because it has priority
708 # to '.hg/dirstate' at reading under HG_PENDING mode
708 # to '.hg/dirstate' at reading under HG_PENDING mode
709 filename = self._pendingfilename
709 filename = self._pendingfilename
710 elif tr:
710 elif tr:
711 # 'dirstate.write()' is not only for writing in-memory
711 # 'dirstate.write()' is not only for writing in-memory
712 # changes out, but also for dropping ambiguous timestamp.
712 # changes out, but also for dropping ambiguous timestamp.
713 # delayed writing re-raise "ambiguous timestamp issue".
713 # delayed writing re-raise "ambiguous timestamp issue".
714 # See also the wiki page below for detail:
714 # See also the wiki page below for detail:
715 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
715 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
716
716
717 # emulate dropping timestamp in 'parsers.pack_dirstate'
717 # emulate dropping timestamp in 'parsers.pack_dirstate'
718 now = _getfsnow(self._opener)
718 now = _getfsnow(self._opener)
719 dmap = self._map
719 dmap = self._map
720 for f, e in dmap.iteritems():
720 for f, e in dmap.iteritems():
721 if e[0] == 'n' and e[3] == now:
721 if e[0] == 'n' and e[3] == now:
722 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
722 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
723 self._nonnormalset.add(f)
723 self._nonnormalset.add(f)
724
724
725 # emulate that all 'dirstate.normal' results are written out
725 # emulate that all 'dirstate.normal' results are written out
726 self._lastnormaltime = 0
726 self._lastnormaltime = 0
727
727
728 # delay writing in-memory changes out
728 # delay writing in-memory changes out
729 tr.addfilegenerator('dirstate', (self._filename,),
729 tr.addfilegenerator('dirstate', (self._filename,),
730 self._writedirstate, location='plain')
730 self._writedirstate, location='plain')
731 return
731 return
732
732
733 st = self._opener(filename, "w", atomictemp=True)
733 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
734 self._writedirstate(st)
734 self._writedirstate(st)
735
735
736 def _writedirstate(self, st):
736 def _writedirstate(self, st):
737 # use the modification time of the newly created temporary file as the
737 # use the modification time of the newly created temporary file as the
738 # filesystem's notion of 'now'
738 # filesystem's notion of 'now'
739 now = util.fstat(st).st_mtime & _rangemask
739 now = util.fstat(st).st_mtime & _rangemask
740
740
741 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
741 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
742 # timestamp of each entries in dirstate, because of 'now > mtime'
742 # timestamp of each entries in dirstate, because of 'now > mtime'
743 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
743 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
744 if delaywrite > 0:
744 if delaywrite > 0:
745 # do we have any files to delay for?
745 # do we have any files to delay for?
746 for f, e in self._map.iteritems():
746 for f, e in self._map.iteritems():
747 if e[0] == 'n' and e[3] == now:
747 if e[0] == 'n' and e[3] == now:
748 import time # to avoid useless import
748 import time # to avoid useless import
749 # rather than sleep n seconds, sleep until the next
749 # rather than sleep n seconds, sleep until the next
750 # multiple of n seconds
750 # multiple of n seconds
751 clock = time.time()
751 clock = time.time()
752 start = int(clock) - (int(clock) % delaywrite)
752 start = int(clock) - (int(clock) % delaywrite)
753 end = start + delaywrite
753 end = start + delaywrite
754 time.sleep(end - clock)
754 time.sleep(end - clock)
755 break
755 break
756
756
757 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
757 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
758 self._nonnormalset = nonnormalentries(self._map)
758 self._nonnormalset = nonnormalentries(self._map)
759 st.close()
759 st.close()
760 self._lastnormaltime = 0
760 self._lastnormaltime = 0
761 self._dirty = self._dirtypl = False
761 self._dirty = self._dirtypl = False
762
762
763 def _dirignore(self, f):
763 def _dirignore(self, f):
764 if f == '.':
764 if f == '.':
765 return False
765 return False
766 if self._ignore(f):
766 if self._ignore(f):
767 return True
767 return True
768 for p in util.finddirs(f):
768 for p in util.finddirs(f):
769 if self._ignore(p):
769 if self._ignore(p):
770 return True
770 return True
771 return False
771 return False
772
772
773 def _ignorefiles(self):
773 def _ignorefiles(self):
774 files = []
774 files = []
775 if os.path.exists(self._join('.hgignore')):
775 if os.path.exists(self._join('.hgignore')):
776 files.append(self._join('.hgignore'))
776 files.append(self._join('.hgignore'))
777 for name, path in self._ui.configitems("ui"):
777 for name, path in self._ui.configitems("ui"):
778 if name == 'ignore' or name.startswith('ignore.'):
778 if name == 'ignore' or name.startswith('ignore.'):
779 # we need to use os.path.join here rather than self._join
779 # we need to use os.path.join here rather than self._join
780 # because path is arbitrary and user-specified
780 # because path is arbitrary and user-specified
781 files.append(os.path.join(self._rootdir, util.expandpath(path)))
781 files.append(os.path.join(self._rootdir, util.expandpath(path)))
782 return files
782 return files
783
783
784 def _ignorefileandline(self, f):
784 def _ignorefileandline(self, f):
785 files = collections.deque(self._ignorefiles())
785 files = collections.deque(self._ignorefiles())
786 visited = set()
786 visited = set()
787 while files:
787 while files:
788 i = files.popleft()
788 i = files.popleft()
789 patterns = matchmod.readpatternfile(i, self._ui.warn,
789 patterns = matchmod.readpatternfile(i, self._ui.warn,
790 sourceinfo=True)
790 sourceinfo=True)
791 for pattern, lineno, line in patterns:
791 for pattern, lineno, line in patterns:
792 kind, p = matchmod._patsplit(pattern, 'glob')
792 kind, p = matchmod._patsplit(pattern, 'glob')
793 if kind == "subinclude":
793 if kind == "subinclude":
794 if p not in visited:
794 if p not in visited:
795 files.append(p)
795 files.append(p)
796 continue
796 continue
797 m = matchmod.match(self._root, '', [], [pattern],
797 m = matchmod.match(self._root, '', [], [pattern],
798 warn=self._ui.warn)
798 warn=self._ui.warn)
799 if m(f):
799 if m(f):
800 return (i, lineno, line)
800 return (i, lineno, line)
801 visited.add(i)
801 visited.add(i)
802 return (None, -1, "")
802 return (None, -1, "")
803
803
804 def _walkexplicit(self, match, subrepos):
804 def _walkexplicit(self, match, subrepos):
805 '''Get stat data about the files explicitly specified by match.
805 '''Get stat data about the files explicitly specified by match.
806
806
807 Return a triple (results, dirsfound, dirsnotfound).
807 Return a triple (results, dirsfound, dirsnotfound).
808 - results is a mapping from filename to stat result. It also contains
808 - results is a mapping from filename to stat result. It also contains
809 listings mapping subrepos and .hg to None.
809 listings mapping subrepos and .hg to None.
810 - dirsfound is a list of files found to be directories.
810 - dirsfound is a list of files found to be directories.
811 - dirsnotfound is a list of files that the dirstate thinks are
811 - dirsnotfound is a list of files that the dirstate thinks are
812 directories and that were not found.'''
812 directories and that were not found.'''
813
813
814 def badtype(mode):
814 def badtype(mode):
815 kind = _('unknown')
815 kind = _('unknown')
816 if stat.S_ISCHR(mode):
816 if stat.S_ISCHR(mode):
817 kind = _('character device')
817 kind = _('character device')
818 elif stat.S_ISBLK(mode):
818 elif stat.S_ISBLK(mode):
819 kind = _('block device')
819 kind = _('block device')
820 elif stat.S_ISFIFO(mode):
820 elif stat.S_ISFIFO(mode):
821 kind = _('fifo')
821 kind = _('fifo')
822 elif stat.S_ISSOCK(mode):
822 elif stat.S_ISSOCK(mode):
823 kind = _('socket')
823 kind = _('socket')
824 elif stat.S_ISDIR(mode):
824 elif stat.S_ISDIR(mode):
825 kind = _('directory')
825 kind = _('directory')
826 return _('unsupported file type (type is %s)') % kind
826 return _('unsupported file type (type is %s)') % kind
827
827
828 matchedir = match.explicitdir
828 matchedir = match.explicitdir
829 badfn = match.bad
829 badfn = match.bad
830 dmap = self._map
830 dmap = self._map
831 lstat = os.lstat
831 lstat = os.lstat
832 getkind = stat.S_IFMT
832 getkind = stat.S_IFMT
833 dirkind = stat.S_IFDIR
833 dirkind = stat.S_IFDIR
834 regkind = stat.S_IFREG
834 regkind = stat.S_IFREG
835 lnkkind = stat.S_IFLNK
835 lnkkind = stat.S_IFLNK
836 join = self._join
836 join = self._join
837 dirsfound = []
837 dirsfound = []
838 foundadd = dirsfound.append
838 foundadd = dirsfound.append
839 dirsnotfound = []
839 dirsnotfound = []
840 notfoundadd = dirsnotfound.append
840 notfoundadd = dirsnotfound.append
841
841
842 if not match.isexact() and self._checkcase:
842 if not match.isexact() and self._checkcase:
843 normalize = self._normalize
843 normalize = self._normalize
844 else:
844 else:
845 normalize = None
845 normalize = None
846
846
847 files = sorted(match.files())
847 files = sorted(match.files())
848 subrepos.sort()
848 subrepos.sort()
849 i, j = 0, 0
849 i, j = 0, 0
850 while i < len(files) and j < len(subrepos):
850 while i < len(files) and j < len(subrepos):
851 subpath = subrepos[j] + "/"
851 subpath = subrepos[j] + "/"
852 if files[i] < subpath:
852 if files[i] < subpath:
853 i += 1
853 i += 1
854 continue
854 continue
855 while i < len(files) and files[i].startswith(subpath):
855 while i < len(files) and files[i].startswith(subpath):
856 del files[i]
856 del files[i]
857 j += 1
857 j += 1
858
858
859 if not files or '.' in files:
859 if not files or '.' in files:
860 files = ['.']
860 files = ['.']
861 results = dict.fromkeys(subrepos)
861 results = dict.fromkeys(subrepos)
862 results['.hg'] = None
862 results['.hg'] = None
863
863
864 alldirs = None
864 alldirs = None
865 for ff in files:
865 for ff in files:
866 # constructing the foldmap is expensive, so don't do it for the
866 # constructing the foldmap is expensive, so don't do it for the
867 # common case where files is ['.']
867 # common case where files is ['.']
868 if normalize and ff != '.':
868 if normalize and ff != '.':
869 nf = normalize(ff, False, True)
869 nf = normalize(ff, False, True)
870 else:
870 else:
871 nf = ff
871 nf = ff
872 if nf in results:
872 if nf in results:
873 continue
873 continue
874
874
875 try:
875 try:
876 st = lstat(join(nf))
876 st = lstat(join(nf))
877 kind = getkind(st.st_mode)
877 kind = getkind(st.st_mode)
878 if kind == dirkind:
878 if kind == dirkind:
879 if nf in dmap:
879 if nf in dmap:
880 # file replaced by dir on disk but still in dirstate
880 # file replaced by dir on disk but still in dirstate
881 results[nf] = None
881 results[nf] = None
882 if matchedir:
882 if matchedir:
883 matchedir(nf)
883 matchedir(nf)
884 foundadd((nf, ff))
884 foundadd((nf, ff))
885 elif kind == regkind or kind == lnkkind:
885 elif kind == regkind or kind == lnkkind:
886 results[nf] = st
886 results[nf] = st
887 else:
887 else:
888 badfn(ff, badtype(kind))
888 badfn(ff, badtype(kind))
889 if nf in dmap:
889 if nf in dmap:
890 results[nf] = None
890 results[nf] = None
891 except OSError as inst: # nf not found on disk - it is dirstate only
891 except OSError as inst: # nf not found on disk - it is dirstate only
892 if nf in dmap: # does it exactly match a missing file?
892 if nf in dmap: # does it exactly match a missing file?
893 results[nf] = None
893 results[nf] = None
894 else: # does it match a missing directory?
894 else: # does it match a missing directory?
895 if alldirs is None:
895 if alldirs is None:
896 alldirs = util.dirs(dmap)
896 alldirs = util.dirs(dmap)
897 if nf in alldirs:
897 if nf in alldirs:
898 if matchedir:
898 if matchedir:
899 matchedir(nf)
899 matchedir(nf)
900 notfoundadd(nf)
900 notfoundadd(nf)
901 else:
901 else:
902 badfn(ff, inst.strerror)
902 badfn(ff, inst.strerror)
903
903
904 # Case insensitive filesystems cannot rely on lstat() failing to detect
904 # Case insensitive filesystems cannot rely on lstat() failing to detect
905 # a case-only rename. Prune the stat object for any file that does not
905 # a case-only rename. Prune the stat object for any file that does not
906 # match the case in the filesystem, if there are multiple files that
906 # match the case in the filesystem, if there are multiple files that
907 # normalize to the same path.
907 # normalize to the same path.
908 if match.isexact() and self._checkcase:
908 if match.isexact() and self._checkcase:
909 normed = {}
909 normed = {}
910
910
911 for f, st in results.iteritems():
911 for f, st in results.iteritems():
912 if st is None:
912 if st is None:
913 continue
913 continue
914
914
915 nc = util.normcase(f)
915 nc = util.normcase(f)
916 paths = normed.get(nc)
916 paths = normed.get(nc)
917
917
918 if paths is None:
918 if paths is None:
919 paths = set()
919 paths = set()
920 normed[nc] = paths
920 normed[nc] = paths
921
921
922 paths.add(f)
922 paths.add(f)
923
923
924 for norm, paths in normed.iteritems():
924 for norm, paths in normed.iteritems():
925 if len(paths) > 1:
925 if len(paths) > 1:
926 for path in paths:
926 for path in paths:
927 folded = self._discoverpath(path, norm, True, None,
927 folded = self._discoverpath(path, norm, True, None,
928 self._dirfoldmap)
928 self._dirfoldmap)
929 if path != folded:
929 if path != folded:
930 results[path] = None
930 results[path] = None
931
931
932 return results, dirsfound, dirsnotfound
932 return results, dirsfound, dirsnotfound
933
933
934 def walk(self, match, subrepos, unknown, ignored, full=True):
934 def walk(self, match, subrepos, unknown, ignored, full=True):
935 '''
935 '''
936 Walk recursively through the directory tree, finding all files
936 Walk recursively through the directory tree, finding all files
937 matched by match.
937 matched by match.
938
938
939 If full is False, maybe skip some known-clean files.
939 If full is False, maybe skip some known-clean files.
940
940
941 Return a dict mapping filename to stat-like object (either
941 Return a dict mapping filename to stat-like object (either
942 mercurial.osutil.stat instance or return value of os.stat()).
942 mercurial.osutil.stat instance or return value of os.stat()).
943
943
944 '''
944 '''
945 # full is a flag that extensions that hook into walk can use -- this
945 # full is a flag that extensions that hook into walk can use -- this
946 # implementation doesn't use it at all. This satisfies the contract
946 # implementation doesn't use it at all. This satisfies the contract
947 # because we only guarantee a "maybe".
947 # because we only guarantee a "maybe".
948
948
949 if ignored:
949 if ignored:
950 ignore = util.never
950 ignore = util.never
951 dirignore = util.never
951 dirignore = util.never
952 elif unknown:
952 elif unknown:
953 ignore = self._ignore
953 ignore = self._ignore
954 dirignore = self._dirignore
954 dirignore = self._dirignore
955 else:
955 else:
956 # if not unknown and not ignored, drop dir recursion and step 2
956 # if not unknown and not ignored, drop dir recursion and step 2
957 ignore = util.always
957 ignore = util.always
958 dirignore = util.always
958 dirignore = util.always
959
959
960 matchfn = match.matchfn
960 matchfn = match.matchfn
961 matchalways = match.always()
961 matchalways = match.always()
962 matchtdir = match.traversedir
962 matchtdir = match.traversedir
963 dmap = self._map
963 dmap = self._map
964 listdir = osutil.listdir
964 listdir = osutil.listdir
965 lstat = os.lstat
965 lstat = os.lstat
966 dirkind = stat.S_IFDIR
966 dirkind = stat.S_IFDIR
967 regkind = stat.S_IFREG
967 regkind = stat.S_IFREG
968 lnkkind = stat.S_IFLNK
968 lnkkind = stat.S_IFLNK
969 join = self._join
969 join = self._join
970
970
971 exact = skipstep3 = False
971 exact = skipstep3 = False
972 if match.isexact(): # match.exact
972 if match.isexact(): # match.exact
973 exact = True
973 exact = True
974 dirignore = util.always # skip step 2
974 dirignore = util.always # skip step 2
975 elif match.prefix(): # match.match, no patterns
975 elif match.prefix(): # match.match, no patterns
976 skipstep3 = True
976 skipstep3 = True
977
977
978 if not exact and self._checkcase:
978 if not exact and self._checkcase:
979 normalize = self._normalize
979 normalize = self._normalize
980 normalizefile = self._normalizefile
980 normalizefile = self._normalizefile
981 skipstep3 = False
981 skipstep3 = False
982 else:
982 else:
983 normalize = self._normalize
983 normalize = self._normalize
984 normalizefile = None
984 normalizefile = None
985
985
986 # step 1: find all explicit files
986 # step 1: find all explicit files
987 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
987 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
988
988
989 skipstep3 = skipstep3 and not (work or dirsnotfound)
989 skipstep3 = skipstep3 and not (work or dirsnotfound)
990 work = [d for d in work if not dirignore(d[0])]
990 work = [d for d in work if not dirignore(d[0])]
991
991
992 # step 2: visit subdirectories
992 # step 2: visit subdirectories
993 def traverse(work, alreadynormed):
993 def traverse(work, alreadynormed):
994 wadd = work.append
994 wadd = work.append
995 while work:
995 while work:
996 nd = work.pop()
996 nd = work.pop()
997 skip = None
997 skip = None
998 if nd == '.':
998 if nd == '.':
999 nd = ''
999 nd = ''
1000 else:
1000 else:
1001 skip = '.hg'
1001 skip = '.hg'
1002 try:
1002 try:
1003 entries = listdir(join(nd), stat=True, skip=skip)
1003 entries = listdir(join(nd), stat=True, skip=skip)
1004 except OSError as inst:
1004 except OSError as inst:
1005 if inst.errno in (errno.EACCES, errno.ENOENT):
1005 if inst.errno in (errno.EACCES, errno.ENOENT):
1006 match.bad(self.pathto(nd), inst.strerror)
1006 match.bad(self.pathto(nd), inst.strerror)
1007 continue
1007 continue
1008 raise
1008 raise
1009 for f, kind, st in entries:
1009 for f, kind, st in entries:
1010 if normalizefile:
1010 if normalizefile:
1011 # even though f might be a directory, we're only
1011 # even though f might be a directory, we're only
1012 # interested in comparing it to files currently in the
1012 # interested in comparing it to files currently in the
1013 # dmap -- therefore normalizefile is enough
1013 # dmap -- therefore normalizefile is enough
1014 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1014 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1015 True)
1015 True)
1016 else:
1016 else:
1017 nf = nd and (nd + "/" + f) or f
1017 nf = nd and (nd + "/" + f) or f
1018 if nf not in results:
1018 if nf not in results:
1019 if kind == dirkind:
1019 if kind == dirkind:
1020 if not ignore(nf):
1020 if not ignore(nf):
1021 if matchtdir:
1021 if matchtdir:
1022 matchtdir(nf)
1022 matchtdir(nf)
1023 wadd(nf)
1023 wadd(nf)
1024 if nf in dmap and (matchalways or matchfn(nf)):
1024 if nf in dmap and (matchalways or matchfn(nf)):
1025 results[nf] = None
1025 results[nf] = None
1026 elif kind == regkind or kind == lnkkind:
1026 elif kind == regkind or kind == lnkkind:
1027 if nf in dmap:
1027 if nf in dmap:
1028 if matchalways or matchfn(nf):
1028 if matchalways or matchfn(nf):
1029 results[nf] = st
1029 results[nf] = st
1030 elif ((matchalways or matchfn(nf))
1030 elif ((matchalways or matchfn(nf))
1031 and not ignore(nf)):
1031 and not ignore(nf)):
1032 # unknown file -- normalize if necessary
1032 # unknown file -- normalize if necessary
1033 if not alreadynormed:
1033 if not alreadynormed:
1034 nf = normalize(nf, False, True)
1034 nf = normalize(nf, False, True)
1035 results[nf] = st
1035 results[nf] = st
1036 elif nf in dmap and (matchalways or matchfn(nf)):
1036 elif nf in dmap and (matchalways or matchfn(nf)):
1037 results[nf] = None
1037 results[nf] = None
1038
1038
1039 for nd, d in work:
1039 for nd, d in work:
1040 # alreadynormed means that processwork doesn't have to do any
1040 # alreadynormed means that processwork doesn't have to do any
1041 # expensive directory normalization
1041 # expensive directory normalization
1042 alreadynormed = not normalize or nd == d
1042 alreadynormed = not normalize or nd == d
1043 traverse([d], alreadynormed)
1043 traverse([d], alreadynormed)
1044
1044
1045 for s in subrepos:
1045 for s in subrepos:
1046 del results[s]
1046 del results[s]
1047 del results['.hg']
1047 del results['.hg']
1048
1048
1049 # step 3: visit remaining files from dmap
1049 # step 3: visit remaining files from dmap
1050 if not skipstep3 and not exact:
1050 if not skipstep3 and not exact:
1051 # If a dmap file is not in results yet, it was either
1051 # If a dmap file is not in results yet, it was either
1052 # a) not matching matchfn b) ignored, c) missing, or d) under a
1052 # a) not matching matchfn b) ignored, c) missing, or d) under a
1053 # symlink directory.
1053 # symlink directory.
1054 if not results and matchalways:
1054 if not results and matchalways:
1055 visit = dmap.keys()
1055 visit = dmap.keys()
1056 else:
1056 else:
1057 visit = [f for f in dmap if f not in results and matchfn(f)]
1057 visit = [f for f in dmap if f not in results and matchfn(f)]
1058 visit.sort()
1058 visit.sort()
1059
1059
1060 if unknown:
1060 if unknown:
1061 # unknown == True means we walked all dirs under the roots
1061 # unknown == True means we walked all dirs under the roots
1062 # that wasn't ignored, and everything that matched was stat'ed
1062 # that wasn't ignored, and everything that matched was stat'ed
1063 # and is already in results.
1063 # and is already in results.
1064 # The rest must thus be ignored or under a symlink.
1064 # The rest must thus be ignored or under a symlink.
1065 audit_path = pathutil.pathauditor(self._root)
1065 audit_path = pathutil.pathauditor(self._root)
1066
1066
1067 for nf in iter(visit):
1067 for nf in iter(visit):
1068 # If a stat for the same file was already added with a
1068 # If a stat for the same file was already added with a
1069 # different case, don't add one for this, since that would
1069 # different case, don't add one for this, since that would
1070 # make it appear as if the file exists under both names
1070 # make it appear as if the file exists under both names
1071 # on disk.
1071 # on disk.
1072 if (normalizefile and
1072 if (normalizefile and
1073 normalizefile(nf, True, True) in results):
1073 normalizefile(nf, True, True) in results):
1074 results[nf] = None
1074 results[nf] = None
1075 # Report ignored items in the dmap as long as they are not
1075 # Report ignored items in the dmap as long as they are not
1076 # under a symlink directory.
1076 # under a symlink directory.
1077 elif audit_path.check(nf):
1077 elif audit_path.check(nf):
1078 try:
1078 try:
1079 results[nf] = lstat(join(nf))
1079 results[nf] = lstat(join(nf))
1080 # file was just ignored, no links, and exists
1080 # file was just ignored, no links, and exists
1081 except OSError:
1081 except OSError:
1082 # file doesn't exist
1082 # file doesn't exist
1083 results[nf] = None
1083 results[nf] = None
1084 else:
1084 else:
1085 # It's either missing or under a symlink directory
1085 # It's either missing or under a symlink directory
1086 # which we in this case report as missing
1086 # which we in this case report as missing
1087 results[nf] = None
1087 results[nf] = None
1088 else:
1088 else:
1089 # We may not have walked the full directory tree above,
1089 # We may not have walked the full directory tree above,
1090 # so stat and check everything we missed.
1090 # so stat and check everything we missed.
1091 nf = iter(visit).next
1091 nf = iter(visit).next
1092 for st in util.statfiles([join(i) for i in visit]):
1092 for st in util.statfiles([join(i) for i in visit]):
1093 results[nf()] = st
1093 results[nf()] = st
1094 return results
1094 return results
1095
1095
1096 def status(self, match, subrepos, ignored, clean, unknown):
1096 def status(self, match, subrepos, ignored, clean, unknown):
1097 '''Determine the status of the working copy relative to the
1097 '''Determine the status of the working copy relative to the
1098 dirstate and return a pair of (unsure, status), where status is of type
1098 dirstate and return a pair of (unsure, status), where status is of type
1099 scmutil.status and:
1099 scmutil.status and:
1100
1100
1101 unsure:
1101 unsure:
1102 files that might have been modified since the dirstate was
1102 files that might have been modified since the dirstate was
1103 written, but need to be read to be sure (size is the same
1103 written, but need to be read to be sure (size is the same
1104 but mtime differs)
1104 but mtime differs)
1105 status.modified:
1105 status.modified:
1106 files that have definitely been modified since the dirstate
1106 files that have definitely been modified since the dirstate
1107 was written (different size or mode)
1107 was written (different size or mode)
1108 status.clean:
1108 status.clean:
1109 files that have definitely not been modified since the
1109 files that have definitely not been modified since the
1110 dirstate was written
1110 dirstate was written
1111 '''
1111 '''
1112 listignored, listclean, listunknown = ignored, clean, unknown
1112 listignored, listclean, listunknown = ignored, clean, unknown
1113 lookup, modified, added, unknown, ignored = [], [], [], [], []
1113 lookup, modified, added, unknown, ignored = [], [], [], [], []
1114 removed, deleted, clean = [], [], []
1114 removed, deleted, clean = [], [], []
1115
1115
1116 dmap = self._map
1116 dmap = self._map
1117 ladd = lookup.append # aka "unsure"
1117 ladd = lookup.append # aka "unsure"
1118 madd = modified.append
1118 madd = modified.append
1119 aadd = added.append
1119 aadd = added.append
1120 uadd = unknown.append
1120 uadd = unknown.append
1121 iadd = ignored.append
1121 iadd = ignored.append
1122 radd = removed.append
1122 radd = removed.append
1123 dadd = deleted.append
1123 dadd = deleted.append
1124 cadd = clean.append
1124 cadd = clean.append
1125 mexact = match.exact
1125 mexact = match.exact
1126 dirignore = self._dirignore
1126 dirignore = self._dirignore
1127 checkexec = self._checkexec
1127 checkexec = self._checkexec
1128 copymap = self._copymap
1128 copymap = self._copymap
1129 lastnormaltime = self._lastnormaltime
1129 lastnormaltime = self._lastnormaltime
1130
1130
1131 # We need to do full walks when either
1131 # We need to do full walks when either
1132 # - we're listing all clean files, or
1132 # - we're listing all clean files, or
1133 # - match.traversedir does something, because match.traversedir should
1133 # - match.traversedir does something, because match.traversedir should
1134 # be called for every dir in the working dir
1134 # be called for every dir in the working dir
1135 full = listclean or match.traversedir is not None
1135 full = listclean or match.traversedir is not None
1136 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1136 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1137 full=full).iteritems():
1137 full=full).iteritems():
1138 if fn not in dmap:
1138 if fn not in dmap:
1139 if (listignored or mexact(fn)) and dirignore(fn):
1139 if (listignored or mexact(fn)) and dirignore(fn):
1140 if listignored:
1140 if listignored:
1141 iadd(fn)
1141 iadd(fn)
1142 else:
1142 else:
1143 uadd(fn)
1143 uadd(fn)
1144 continue
1144 continue
1145
1145
1146 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1146 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1147 # written like that for performance reasons. dmap[fn] is not a
1147 # written like that for performance reasons. dmap[fn] is not a
1148 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1148 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1149 # opcode has fast paths when the value to be unpacked is a tuple or
1149 # opcode has fast paths when the value to be unpacked is a tuple or
1150 # a list, but falls back to creating a full-fledged iterator in
1150 # a list, but falls back to creating a full-fledged iterator in
1151 # general. That is much slower than simply accessing and storing the
1151 # general. That is much slower than simply accessing and storing the
1152 # tuple members one by one.
1152 # tuple members one by one.
1153 t = dmap[fn]
1153 t = dmap[fn]
1154 state = t[0]
1154 state = t[0]
1155 mode = t[1]
1155 mode = t[1]
1156 size = t[2]
1156 size = t[2]
1157 time = t[3]
1157 time = t[3]
1158
1158
1159 if not st and state in "nma":
1159 if not st and state in "nma":
1160 dadd(fn)
1160 dadd(fn)
1161 elif state == 'n':
1161 elif state == 'n':
1162 if (size >= 0 and
1162 if (size >= 0 and
1163 ((size != st.st_size and size != st.st_size & _rangemask)
1163 ((size != st.st_size and size != st.st_size & _rangemask)
1164 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1164 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1165 or size == -2 # other parent
1165 or size == -2 # other parent
1166 or fn in copymap):
1166 or fn in copymap):
1167 madd(fn)
1167 madd(fn)
1168 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1168 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1169 ladd(fn)
1169 ladd(fn)
1170 elif st.st_mtime == lastnormaltime:
1170 elif st.st_mtime == lastnormaltime:
1171 # fn may have just been marked as normal and it may have
1171 # fn may have just been marked as normal and it may have
1172 # changed in the same second without changing its size.
1172 # changed in the same second without changing its size.
1173 # This can happen if we quickly do multiple commits.
1173 # This can happen if we quickly do multiple commits.
1174 # Force lookup, so we don't miss such a racy file change.
1174 # Force lookup, so we don't miss such a racy file change.
1175 ladd(fn)
1175 ladd(fn)
1176 elif listclean:
1176 elif listclean:
1177 cadd(fn)
1177 cadd(fn)
1178 elif state == 'm':
1178 elif state == 'm':
1179 madd(fn)
1179 madd(fn)
1180 elif state == 'a':
1180 elif state == 'a':
1181 aadd(fn)
1181 aadd(fn)
1182 elif state == 'r':
1182 elif state == 'r':
1183 radd(fn)
1183 radd(fn)
1184
1184
1185 return (lookup, scmutil.status(modified, added, removed, deleted,
1185 return (lookup, scmutil.status(modified, added, removed, deleted,
1186 unknown, ignored, clean))
1186 unknown, ignored, clean))
1187
1187
1188 def matches(self, match):
1188 def matches(self, match):
1189 '''
1189 '''
1190 return files in the dirstate (in whatever state) filtered by match
1190 return files in the dirstate (in whatever state) filtered by match
1191 '''
1191 '''
1192 dmap = self._map
1192 dmap = self._map
1193 if match.always():
1193 if match.always():
1194 return dmap.keys()
1194 return dmap.keys()
1195 files = match.files()
1195 files = match.files()
1196 if match.isexact():
1196 if match.isexact():
1197 # fast path -- filter the other way around, since typically files is
1197 # fast path -- filter the other way around, since typically files is
1198 # much smaller than dmap
1198 # much smaller than dmap
1199 return [f for f in files if f in dmap]
1199 return [f for f in files if f in dmap]
1200 if match.prefix() and all(fn in dmap for fn in files):
1200 if match.prefix() and all(fn in dmap for fn in files):
1201 # fast path -- all the values are known to be files, so just return
1201 # fast path -- all the values are known to be files, so just return
1202 # that
1202 # that
1203 return list(files)
1203 return list(files)
1204 return [f for f in dmap if match(f)]
1204 return [f for f in dmap if match(f)]
1205
1205
1206 def _actualfilename(self, tr):
1206 def _actualfilename(self, tr):
1207 if tr:
1207 if tr:
1208 return self._pendingfilename
1208 return self._pendingfilename
1209 else:
1209 else:
1210 return self._filename
1210 return self._filename
1211
1211
1212 def savebackup(self, tr, suffix='', prefix=''):
1212 def savebackup(self, tr, suffix='', prefix=''):
1213 '''Save current dirstate into backup file with suffix'''
1213 '''Save current dirstate into backup file with suffix'''
1214 assert len(suffix) > 0 or len(prefix) > 0
1214 assert len(suffix) > 0 or len(prefix) > 0
1215 filename = self._actualfilename(tr)
1215 filename = self._actualfilename(tr)
1216
1216
1217 # use '_writedirstate' instead of 'write' to write changes certainly,
1217 # use '_writedirstate' instead of 'write' to write changes certainly,
1218 # because the latter omits writing out if transaction is running.
1218 # because the latter omits writing out if transaction is running.
1219 # output file will be used to create backup of dirstate at this point.
1219 # output file will be used to create backup of dirstate at this point.
1220 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1220 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1221 checkambig=True))
1221
1222
1222 if tr:
1223 if tr:
1223 # ensure that subsequent tr.writepending returns True for
1224 # ensure that subsequent tr.writepending returns True for
1224 # changes written out above, even if dirstate is never
1225 # changes written out above, even if dirstate is never
1225 # changed after this
1226 # changed after this
1226 tr.addfilegenerator('dirstate', (self._filename,),
1227 tr.addfilegenerator('dirstate', (self._filename,),
1227 self._writedirstate, location='plain')
1228 self._writedirstate, location='plain')
1228
1229
1229 # ensure that pending file written above is unlinked at
1230 # ensure that pending file written above is unlinked at
1230 # failure, even if tr.writepending isn't invoked until the
1231 # failure, even if tr.writepending isn't invoked until the
1231 # end of this transaction
1232 # end of this transaction
1232 tr.registertmp(filename, location='plain')
1233 tr.registertmp(filename, location='plain')
1233
1234
1234 self._opener.write(prefix + self._filename + suffix,
1235 self._opener.write(prefix + self._filename + suffix,
1235 self._opener.tryread(filename))
1236 self._opener.tryread(filename))
1236
1237
1237 def restorebackup(self, tr, suffix='', prefix=''):
1238 def restorebackup(self, tr, suffix='', prefix=''):
1238 '''Restore dirstate by backup file with suffix'''
1239 '''Restore dirstate by backup file with suffix'''
1239 assert len(suffix) > 0 or len(prefix) > 0
1240 assert len(suffix) > 0 or len(prefix) > 0
1240 # this "invalidate()" prevents "wlock.release()" from writing
1241 # this "invalidate()" prevents "wlock.release()" from writing
1241 # changes of dirstate out after restoring from backup file
1242 # changes of dirstate out after restoring from backup file
1242 self.invalidate()
1243 self.invalidate()
1243 filename = self._actualfilename(tr)
1244 filename = self._actualfilename(tr)
1244 # using self._filename to avoid having "pending" in the backup filename
1245 # using self._filename to avoid having "pending" in the backup filename
1245 self._opener.rename(prefix + self._filename + suffix, filename)
1246 self._opener.rename(prefix + self._filename + suffix, filename)
1246
1247
1247 def clearbackup(self, tr, suffix='', prefix=''):
1248 def clearbackup(self, tr, suffix='', prefix=''):
1248 '''Clear backup file with suffix'''
1249 '''Clear backup file with suffix'''
1249 assert len(suffix) > 0 or len(prefix) > 0
1250 assert len(suffix) > 0 or len(prefix) > 0
1250 # using self._filename to avoid having "pending" in the backup filename
1251 # using self._filename to avoid having "pending" in the backup filename
1251 self._opener.unlink(prefix + self._filename + suffix)
1252 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now