##// END OF EJS Templates
devel: use the new 'config' argument for the dirstate develwarn
Pierre-Yves David -
r29097:ff4cc443 default
parent child Browse files
Show More
@@ -1,1243 +1,1242
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29 filecache = scmutil.filecache
29 filecache = scmutil.filecache
30 _rangemask = 0x7fffffff
30 _rangemask = 0x7fffffff
31
31
32 dirstatetuple = parsers.dirstatetuple
32 dirstatetuple = parsers.dirstatetuple
33
33
34 class repocache(filecache):
34 class repocache(filecache):
35 """filecache for files in .hg/"""
35 """filecache for files in .hg/"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj._opener.join(fname)
37 return obj._opener.join(fname)
38
38
39 class rootcache(filecache):
39 class rootcache(filecache):
40 """filecache for files in the repository root"""
40 """filecache for files in the repository root"""
41 def join(self, obj, fname):
41 def join(self, obj, fname):
42 return obj._join(fname)
42 return obj._join(fname)
43
43
44 def _getfsnow(vfs):
44 def _getfsnow(vfs):
45 '''Get "now" timestamp on filesystem'''
45 '''Get "now" timestamp on filesystem'''
46 tmpfd, tmpname = vfs.mkstemp()
46 tmpfd, tmpname = vfs.mkstemp()
47 try:
47 try:
48 return os.fstat(tmpfd).st_mtime
48 return os.fstat(tmpfd).st_mtime
49 finally:
49 finally:
50 os.close(tmpfd)
50 os.close(tmpfd)
51 vfs.unlink(tmpname)
51 vfs.unlink(tmpname)
52
52
53 def nonnormalentries(dmap):
53 def nonnormalentries(dmap):
54 '''Compute the nonnormal dirstate entries from the dmap'''
54 '''Compute the nonnormal dirstate entries from the dmap'''
55 try:
55 try:
56 return parsers.nonnormalentries(dmap)
56 return parsers.nonnormalentries(dmap)
57 except AttributeError:
57 except AttributeError:
58 return set(fname for fname, e in dmap.iteritems()
58 return set(fname for fname, e in dmap.iteritems()
59 if e[0] != 'n' or e[3] == -1)
59 if e[0] != 'n' or e[3] == -1)
60
60
61 def _trypending(root, vfs, filename):
61 def _trypending(root, vfs, filename):
62 '''Open file to be read according to HG_PENDING environment variable
62 '''Open file to be read according to HG_PENDING environment variable
63
63
64 This opens '.pending' of specified 'filename' only when HG_PENDING
64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 is equal to 'root'.
65 is equal to 'root'.
66
66
67 This returns '(fp, is_pending_opened)' tuple.
67 This returns '(fp, is_pending_opened)' tuple.
68 '''
68 '''
69 if root == os.environ.get('HG_PENDING'):
69 if root == os.environ.get('HG_PENDING'):
70 try:
70 try:
71 return (vfs('%s.pending' % filename), True)
71 return (vfs('%s.pending' % filename), True)
72 except IOError as inst:
72 except IOError as inst:
73 if inst.errno != errno.ENOENT:
73 if inst.errno != errno.ENOENT:
74 raise
74 raise
75 return (vfs(filename), False)
75 return (vfs(filename), False)
76
76
77 class dirstate(object):
77 class dirstate(object):
78
78
79 def __init__(self, opener, ui, root, validate):
79 def __init__(self, opener, ui, root, validate):
80 '''Create a new dirstate object.
80 '''Create a new dirstate object.
81
81
82 opener is an open()-like callable that can be used to open the
82 opener is an open()-like callable that can be used to open the
83 dirstate file; root is the root of the directory tracked by
83 dirstate file; root is the root of the directory tracked by
84 the dirstate.
84 the dirstate.
85 '''
85 '''
86 self._opener = opener
86 self._opener = opener
87 self._validate = validate
87 self._validate = validate
88 self._root = root
88 self._root = root
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # UNC path pointing to root share (issue4557)
90 # UNC path pointing to root share (issue4557)
91 self._rootdir = pathutil.normasprefix(root)
91 self._rootdir = pathutil.normasprefix(root)
92 # internal config: ui.forcecwd
92 # internal config: ui.forcecwd
93 forcecwd = ui.config('ui', 'forcecwd')
93 forcecwd = ui.config('ui', 'forcecwd')
94 if forcecwd:
94 if forcecwd:
95 self._cwd = forcecwd
95 self._cwd = forcecwd
96 self._dirty = False
96 self._dirty = False
97 self._dirtypl = False
97 self._dirtypl = False
98 self._lastnormaltime = 0
98 self._lastnormaltime = 0
99 self._ui = ui
99 self._ui = ui
100 self._filecache = {}
100 self._filecache = {}
101 self._parentwriters = 0
101 self._parentwriters = 0
102 self._filename = 'dirstate'
102 self._filename = 'dirstate'
103 self._pendingfilename = '%s.pending' % self._filename
103 self._pendingfilename = '%s.pending' % self._filename
104
104
105 # for consistent view between _pl() and _read() invocations
105 # for consistent view between _pl() and _read() invocations
106 self._pendingmode = None
106 self._pendingmode = None
107
107
108 def beginparentchange(self):
108 def beginparentchange(self):
109 '''Marks the beginning of a set of changes that involve changing
109 '''Marks the beginning of a set of changes that involve changing
110 the dirstate parents. If there is an exception during this time,
110 the dirstate parents. If there is an exception during this time,
111 the dirstate will not be written when the wlock is released. This
111 the dirstate will not be written when the wlock is released. This
112 prevents writing an incoherent dirstate where the parent doesn't
112 prevents writing an incoherent dirstate where the parent doesn't
113 match the contents.
113 match the contents.
114 '''
114 '''
115 self._parentwriters += 1
115 self._parentwriters += 1
116
116
117 def endparentchange(self):
117 def endparentchange(self):
118 '''Marks the end of a set of changes that involve changing the
118 '''Marks the end of a set of changes that involve changing the
119 dirstate parents. Once all parent changes have been marked done,
119 dirstate parents. Once all parent changes have been marked done,
120 the wlock will be free to write the dirstate on release.
120 the wlock will be free to write the dirstate on release.
121 '''
121 '''
122 if self._parentwriters > 0:
122 if self._parentwriters > 0:
123 self._parentwriters -= 1
123 self._parentwriters -= 1
124
124
125 def pendingparentchange(self):
125 def pendingparentchange(self):
126 '''Returns true if the dirstate is in the middle of a set of changes
126 '''Returns true if the dirstate is in the middle of a set of changes
127 that modify the dirstate parent.
127 that modify the dirstate parent.
128 '''
128 '''
129 return self._parentwriters > 0
129 return self._parentwriters > 0
130
130
131 @propertycache
131 @propertycache
132 def _map(self):
132 def _map(self):
133 '''Return the dirstate contents as a map from filename to
133 '''Return the dirstate contents as a map from filename to
134 (state, mode, size, time).'''
134 (state, mode, size, time).'''
135 self._read()
135 self._read()
136 return self._map
136 return self._map
137
137
138 @propertycache
138 @propertycache
139 def _copymap(self):
139 def _copymap(self):
140 self._read()
140 self._read()
141 return self._copymap
141 return self._copymap
142
142
143 @propertycache
143 @propertycache
144 def _nonnormalset(self):
144 def _nonnormalset(self):
145 return nonnormalentries(self._map)
145 return nonnormalentries(self._map)
146
146
147 @propertycache
147 @propertycache
148 def _filefoldmap(self):
148 def _filefoldmap(self):
149 try:
149 try:
150 makefilefoldmap = parsers.make_file_foldmap
150 makefilefoldmap = parsers.make_file_foldmap
151 except AttributeError:
151 except AttributeError:
152 pass
152 pass
153 else:
153 else:
154 return makefilefoldmap(self._map, util.normcasespec,
154 return makefilefoldmap(self._map, util.normcasespec,
155 util.normcasefallback)
155 util.normcasefallback)
156
156
157 f = {}
157 f = {}
158 normcase = util.normcase
158 normcase = util.normcase
159 for name, s in self._map.iteritems():
159 for name, s in self._map.iteritems():
160 if s[0] != 'r':
160 if s[0] != 'r':
161 f[normcase(name)] = name
161 f[normcase(name)] = name
162 f['.'] = '.' # prevents useless util.fspath() invocation
162 f['.'] = '.' # prevents useless util.fspath() invocation
163 return f
163 return f
164
164
165 @propertycache
165 @propertycache
166 def _dirfoldmap(self):
166 def _dirfoldmap(self):
167 f = {}
167 f = {}
168 normcase = util.normcase
168 normcase = util.normcase
169 for name in self._dirs:
169 for name in self._dirs:
170 f[normcase(name)] = name
170 f[normcase(name)] = name
171 return f
171 return f
172
172
173 @repocache('branch')
173 @repocache('branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read("branch").strip() or "default"
176 return self._opener.read("branch").strip() or "default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return "default"
180 return "default"
181
181
182 @propertycache
182 @propertycache
183 def _pl(self):
183 def _pl(self):
184 try:
184 try:
185 fp = self._opendirstatefile()
185 fp = self._opendirstatefile()
186 st = fp.read(40)
186 st = fp.read(40)
187 fp.close()
187 fp.close()
188 l = len(st)
188 l = len(st)
189 if l == 40:
189 if l == 40:
190 return st[:20], st[20:40]
190 return st[:20], st[20:40]
191 elif l > 0 and l < 40:
191 elif l > 0 and l < 40:
192 raise error.Abort(_('working directory state appears damaged!'))
192 raise error.Abort(_('working directory state appears damaged!'))
193 except IOError as err:
193 except IOError as err:
194 if err.errno != errno.ENOENT:
194 if err.errno != errno.ENOENT:
195 raise
195 raise
196 return [nullid, nullid]
196 return [nullid, nullid]
197
197
198 @propertycache
198 @propertycache
199 def _dirs(self):
199 def _dirs(self):
200 return util.dirs(self._map, 'r')
200 return util.dirs(self._map, 'r')
201
201
202 def dirs(self):
202 def dirs(self):
203 return self._dirs
203 return self._dirs
204
204
205 @rootcache('.hgignore')
205 @rootcache('.hgignore')
206 def _ignore(self):
206 def _ignore(self):
207 files = self._ignorefiles()
207 files = self._ignorefiles()
208 if not files:
208 if not files:
209 return util.never
209 return util.never
210
210
211 pats = ['include:%s' % f for f in files]
211 pats = ['include:%s' % f for f in files]
212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
213
213
214 @propertycache
214 @propertycache
215 def _slash(self):
215 def _slash(self):
216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
217
217
218 @propertycache
218 @propertycache
219 def _checklink(self):
219 def _checklink(self):
220 return util.checklink(self._root)
220 return util.checklink(self._root)
221
221
222 @propertycache
222 @propertycache
223 def _checkexec(self):
223 def _checkexec(self):
224 return util.checkexec(self._root)
224 return util.checkexec(self._root)
225
225
226 @propertycache
226 @propertycache
227 def _checkcase(self):
227 def _checkcase(self):
228 return not util.checkcase(self._join('.hg'))
228 return not util.checkcase(self._join('.hg'))
229
229
230 def _join(self, f):
230 def _join(self, f):
231 # much faster than os.path.join()
231 # much faster than os.path.join()
232 # it's safe because f is always a relative path
232 # it's safe because f is always a relative path
233 return self._rootdir + f
233 return self._rootdir + f
234
234
235 def flagfunc(self, buildfallback):
235 def flagfunc(self, buildfallback):
236 if self._checklink and self._checkexec:
236 if self._checklink and self._checkexec:
237 def f(x):
237 def f(x):
238 try:
238 try:
239 st = os.lstat(self._join(x))
239 st = os.lstat(self._join(x))
240 if util.statislink(st):
240 if util.statislink(st):
241 return 'l'
241 return 'l'
242 if util.statisexec(st):
242 if util.statisexec(st):
243 return 'x'
243 return 'x'
244 except OSError:
244 except OSError:
245 pass
245 pass
246 return ''
246 return ''
247 return f
247 return f
248
248
249 fallback = buildfallback()
249 fallback = buildfallback()
250 if self._checklink:
250 if self._checklink:
251 def f(x):
251 def f(x):
252 if os.path.islink(self._join(x)):
252 if os.path.islink(self._join(x)):
253 return 'l'
253 return 'l'
254 if 'x' in fallback(x):
254 if 'x' in fallback(x):
255 return 'x'
255 return 'x'
256 return ''
256 return ''
257 return f
257 return f
258 if self._checkexec:
258 if self._checkexec:
259 def f(x):
259 def f(x):
260 if 'l' in fallback(x):
260 if 'l' in fallback(x):
261 return 'l'
261 return 'l'
262 if util.isexec(self._join(x)):
262 if util.isexec(self._join(x)):
263 return 'x'
263 return 'x'
264 return ''
264 return ''
265 return f
265 return f
266 else:
266 else:
267 return fallback
267 return fallback
268
268
269 @propertycache
269 @propertycache
270 def _cwd(self):
270 def _cwd(self):
271 return os.getcwd()
271 return os.getcwd()
272
272
273 def getcwd(self):
273 def getcwd(self):
274 '''Return the path from which a canonical path is calculated.
274 '''Return the path from which a canonical path is calculated.
275
275
276 This path should be used to resolve file patterns or to convert
276 This path should be used to resolve file patterns or to convert
277 canonical paths back to file paths for display. It shouldn't be
277 canonical paths back to file paths for display. It shouldn't be
278 used to get real file paths. Use vfs functions instead.
278 used to get real file paths. Use vfs functions instead.
279 '''
279 '''
280 cwd = self._cwd
280 cwd = self._cwd
281 if cwd == self._root:
281 if cwd == self._root:
282 return ''
282 return ''
283 # self._root ends with a path separator if self._root is '/' or 'C:\'
283 # self._root ends with a path separator if self._root is '/' or 'C:\'
284 rootsep = self._root
284 rootsep = self._root
285 if not util.endswithsep(rootsep):
285 if not util.endswithsep(rootsep):
286 rootsep += os.sep
286 rootsep += os.sep
287 if cwd.startswith(rootsep):
287 if cwd.startswith(rootsep):
288 return cwd[len(rootsep):]
288 return cwd[len(rootsep):]
289 else:
289 else:
290 # we're outside the repo. return an absolute path.
290 # we're outside the repo. return an absolute path.
291 return cwd
291 return cwd
292
292
293 def pathto(self, f, cwd=None):
293 def pathto(self, f, cwd=None):
294 if cwd is None:
294 if cwd is None:
295 cwd = self.getcwd()
295 cwd = self.getcwd()
296 path = util.pathto(self._root, cwd, f)
296 path = util.pathto(self._root, cwd, f)
297 if self._slash:
297 if self._slash:
298 return util.pconvert(path)
298 return util.pconvert(path)
299 return path
299 return path
300
300
301 def __getitem__(self, key):
301 def __getitem__(self, key):
302 '''Return the current state of key (a filename) in the dirstate.
302 '''Return the current state of key (a filename) in the dirstate.
303
303
304 States are:
304 States are:
305 n normal
305 n normal
306 m needs merging
306 m needs merging
307 r marked for removal
307 r marked for removal
308 a marked for addition
308 a marked for addition
309 ? not tracked
309 ? not tracked
310 '''
310 '''
311 return self._map.get(key, ("?",))[0]
311 return self._map.get(key, ("?",))[0]
312
312
313 def __contains__(self, key):
313 def __contains__(self, key):
314 return key in self._map
314 return key in self._map
315
315
316 def __iter__(self):
316 def __iter__(self):
317 for x in sorted(self._map):
317 for x in sorted(self._map):
318 yield x
318 yield x
319
319
320 def iteritems(self):
320 def iteritems(self):
321 return self._map.iteritems()
321 return self._map.iteritems()
322
322
323 def parents(self):
323 def parents(self):
324 return [self._validate(p) for p in self._pl]
324 return [self._validate(p) for p in self._pl]
325
325
326 def p1(self):
326 def p1(self):
327 return self._validate(self._pl[0])
327 return self._validate(self._pl[0])
328
328
329 def p2(self):
329 def p2(self):
330 return self._validate(self._pl[1])
330 return self._validate(self._pl[1])
331
331
332 def branch(self):
332 def branch(self):
333 return encoding.tolocal(self._branch)
333 return encoding.tolocal(self._branch)
334
334
335 def setparents(self, p1, p2=nullid):
335 def setparents(self, p1, p2=nullid):
336 """Set dirstate parents to p1 and p2.
336 """Set dirstate parents to p1 and p2.
337
337
338 When moving from two parents to one, 'm' merged entries a
338 When moving from two parents to one, 'm' merged entries a
339 adjusted to normal and previous copy records discarded and
339 adjusted to normal and previous copy records discarded and
340 returned by the call.
340 returned by the call.
341
341
342 See localrepo.setparents()
342 See localrepo.setparents()
343 """
343 """
344 if self._parentwriters == 0:
344 if self._parentwriters == 0:
345 raise ValueError("cannot set dirstate parent without "
345 raise ValueError("cannot set dirstate parent without "
346 "calling dirstate.beginparentchange")
346 "calling dirstate.beginparentchange")
347
347
348 self._dirty = self._dirtypl = True
348 self._dirty = self._dirtypl = True
349 oldp2 = self._pl[1]
349 oldp2 = self._pl[1]
350 self._pl = p1, p2
350 self._pl = p1, p2
351 copies = {}
351 copies = {}
352 if oldp2 != nullid and p2 == nullid:
352 if oldp2 != nullid and p2 == nullid:
353 for f, s in self._map.iteritems():
353 for f, s in self._map.iteritems():
354 # Discard 'm' markers when moving away from a merge state
354 # Discard 'm' markers when moving away from a merge state
355 if s[0] == 'm':
355 if s[0] == 'm':
356 if f in self._copymap:
356 if f in self._copymap:
357 copies[f] = self._copymap[f]
357 copies[f] = self._copymap[f]
358 self.normallookup(f)
358 self.normallookup(f)
359 # Also fix up otherparent markers
359 # Also fix up otherparent markers
360 elif s[0] == 'n' and s[2] == -2:
360 elif s[0] == 'n' and s[2] == -2:
361 if f in self._copymap:
361 if f in self._copymap:
362 copies[f] = self._copymap[f]
362 copies[f] = self._copymap[f]
363 self.add(f)
363 self.add(f)
364 return copies
364 return copies
365
365
366 def setbranch(self, branch):
366 def setbranch(self, branch):
367 self._branch = encoding.fromlocal(branch)
367 self._branch = encoding.fromlocal(branch)
368 f = self._opener('branch', 'w', atomictemp=True)
368 f = self._opener('branch', 'w', atomictemp=True)
369 try:
369 try:
370 f.write(self._branch + '\n')
370 f.write(self._branch + '\n')
371 f.close()
371 f.close()
372
372
373 # make sure filecache has the correct stat info for _branch after
373 # make sure filecache has the correct stat info for _branch after
374 # replacing the underlying file
374 # replacing the underlying file
375 ce = self._filecache['_branch']
375 ce = self._filecache['_branch']
376 if ce:
376 if ce:
377 ce.refresh()
377 ce.refresh()
378 except: # re-raises
378 except: # re-raises
379 f.discard()
379 f.discard()
380 raise
380 raise
381
381
382 def _opendirstatefile(self):
382 def _opendirstatefile(self):
383 fp, mode = _trypending(self._root, self._opener, self._filename)
383 fp, mode = _trypending(self._root, self._opener, self._filename)
384 if self._pendingmode is not None and self._pendingmode != mode:
384 if self._pendingmode is not None and self._pendingmode != mode:
385 fp.close()
385 fp.close()
386 raise error.Abort(_('working directory state may be '
386 raise error.Abort(_('working directory state may be '
387 'changed parallelly'))
387 'changed parallelly'))
388 self._pendingmode = mode
388 self._pendingmode = mode
389 return fp
389 return fp
390
390
391 def _read(self):
391 def _read(self):
392 self._map = {}
392 self._map = {}
393 self._copymap = {}
393 self._copymap = {}
394 try:
394 try:
395 fp = self._opendirstatefile()
395 fp = self._opendirstatefile()
396 try:
396 try:
397 st = fp.read()
397 st = fp.read()
398 finally:
398 finally:
399 fp.close()
399 fp.close()
400 except IOError as err:
400 except IOError as err:
401 if err.errno != errno.ENOENT:
401 if err.errno != errno.ENOENT:
402 raise
402 raise
403 return
403 return
404 if not st:
404 if not st:
405 return
405 return
406
406
407 if util.safehasattr(parsers, 'dict_new_presized'):
407 if util.safehasattr(parsers, 'dict_new_presized'):
408 # Make an estimate of the number of files in the dirstate based on
408 # Make an estimate of the number of files in the dirstate based on
409 # its size. From a linear regression on a set of real-world repos,
409 # its size. From a linear regression on a set of real-world repos,
410 # all over 10,000 files, the size of a dirstate entry is 85
410 # all over 10,000 files, the size of a dirstate entry is 85
411 # bytes. The cost of resizing is significantly higher than the cost
411 # bytes. The cost of resizing is significantly higher than the cost
412 # of filling in a larger presized dict, so subtract 20% from the
412 # of filling in a larger presized dict, so subtract 20% from the
413 # size.
413 # size.
414 #
414 #
415 # This heuristic is imperfect in many ways, so in a future dirstate
415 # This heuristic is imperfect in many ways, so in a future dirstate
416 # format update it makes sense to just record the number of entries
416 # format update it makes sense to just record the number of entries
417 # on write.
417 # on write.
418 self._map = parsers.dict_new_presized(len(st) / 71)
418 self._map = parsers.dict_new_presized(len(st) / 71)
419
419
420 # Python's garbage collector triggers a GC each time a certain number
420 # Python's garbage collector triggers a GC each time a certain number
421 # of container objects (the number being defined by
421 # of container objects (the number being defined by
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 # for each file in the dirstate. The C version then immediately marks
423 # for each file in the dirstate. The C version then immediately marks
424 # them as not to be tracked by the collector. However, this has no
424 # them as not to be tracked by the collector. However, this has no
425 # effect on when GCs are triggered, only on what objects the GC looks
425 # effect on when GCs are triggered, only on what objects the GC looks
426 # into. This means that O(number of files) GCs are unavoidable.
426 # into. This means that O(number of files) GCs are unavoidable.
427 # Depending on when in the process's lifetime the dirstate is parsed,
427 # Depending on when in the process's lifetime the dirstate is parsed,
428 # this can get very expensive. As a workaround, disable GC while
428 # this can get very expensive. As a workaround, disable GC while
429 # parsing the dirstate.
429 # parsing the dirstate.
430 #
430 #
431 # (we cannot decorate the function directly since it is in a C module)
431 # (we cannot decorate the function directly since it is in a C module)
432 parse_dirstate = util.nogc(parsers.parse_dirstate)
432 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 p = parse_dirstate(self._map, self._copymap, st)
433 p = parse_dirstate(self._map, self._copymap, st)
434 if not self._dirtypl:
434 if not self._dirtypl:
435 self._pl = p
435 self._pl = p
436
436
437 def invalidate(self):
437 def invalidate(self):
438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
439 "_pl", "_dirs", "_ignore", "_nonnormalset"):
439 "_pl", "_dirs", "_ignore", "_nonnormalset"):
440 if a in self.__dict__:
440 if a in self.__dict__:
441 delattr(self, a)
441 delattr(self, a)
442 self._lastnormaltime = 0
442 self._lastnormaltime = 0
443 self._dirty = False
443 self._dirty = False
444 self._parentwriters = 0
444 self._parentwriters = 0
445
445
446 def copy(self, source, dest):
446 def copy(self, source, dest):
447 """Mark dest as a copy of source. Unmark dest if source is None."""
447 """Mark dest as a copy of source. Unmark dest if source is None."""
448 if source == dest:
448 if source == dest:
449 return
449 return
450 self._dirty = True
450 self._dirty = True
451 if source is not None:
451 if source is not None:
452 self._copymap[dest] = source
452 self._copymap[dest] = source
453 elif dest in self._copymap:
453 elif dest in self._copymap:
454 del self._copymap[dest]
454 del self._copymap[dest]
455
455
456 def copied(self, file):
456 def copied(self, file):
457 return self._copymap.get(file, None)
457 return self._copymap.get(file, None)
458
458
459 def copies(self):
459 def copies(self):
460 return self._copymap
460 return self._copymap
461
461
462 def _droppath(self, f):
462 def _droppath(self, f):
463 if self[f] not in "?r" and "_dirs" in self.__dict__:
463 if self[f] not in "?r" and "_dirs" in self.__dict__:
464 self._dirs.delpath(f)
464 self._dirs.delpath(f)
465
465
466 if "_filefoldmap" in self.__dict__:
466 if "_filefoldmap" in self.__dict__:
467 normed = util.normcase(f)
467 normed = util.normcase(f)
468 if normed in self._filefoldmap:
468 if normed in self._filefoldmap:
469 del self._filefoldmap[normed]
469 del self._filefoldmap[normed]
470
470
471 def _addpath(self, f, state, mode, size, mtime):
471 def _addpath(self, f, state, mode, size, mtime):
472 oldstate = self[f]
472 oldstate = self[f]
473 if state == 'a' or oldstate == 'r':
473 if state == 'a' or oldstate == 'r':
474 scmutil.checkfilename(f)
474 scmutil.checkfilename(f)
475 if f in self._dirs:
475 if f in self._dirs:
476 raise error.Abort(_('directory %r already in dirstate') % f)
476 raise error.Abort(_('directory %r already in dirstate') % f)
477 # shadows
477 # shadows
478 for d in util.finddirs(f):
478 for d in util.finddirs(f):
479 if d in self._dirs:
479 if d in self._dirs:
480 break
480 break
481 if d in self._map and self[d] != 'r':
481 if d in self._map and self[d] != 'r':
482 raise error.Abort(
482 raise error.Abort(
483 _('file %r in dirstate clashes with %r') % (d, f))
483 _('file %r in dirstate clashes with %r') % (d, f))
484 if oldstate in "?r" and "_dirs" in self.__dict__:
484 if oldstate in "?r" and "_dirs" in self.__dict__:
485 self._dirs.addpath(f)
485 self._dirs.addpath(f)
486 self._dirty = True
486 self._dirty = True
487 self._map[f] = dirstatetuple(state, mode, size, mtime)
487 self._map[f] = dirstatetuple(state, mode, size, mtime)
488 if state != 'n' or mtime == -1:
488 if state != 'n' or mtime == -1:
489 self._nonnormalset.add(f)
489 self._nonnormalset.add(f)
490
490
491 def normal(self, f):
491 def normal(self, f):
492 '''Mark a file normal and clean.'''
492 '''Mark a file normal and clean.'''
493 s = os.lstat(self._join(f))
493 s = os.lstat(self._join(f))
494 mtime = s.st_mtime
494 mtime = s.st_mtime
495 self._addpath(f, 'n', s.st_mode,
495 self._addpath(f, 'n', s.st_mode,
496 s.st_size & _rangemask, mtime & _rangemask)
496 s.st_size & _rangemask, mtime & _rangemask)
497 if f in self._copymap:
497 if f in self._copymap:
498 del self._copymap[f]
498 del self._copymap[f]
499 if f in self._nonnormalset:
499 if f in self._nonnormalset:
500 self._nonnormalset.remove(f)
500 self._nonnormalset.remove(f)
501 if mtime > self._lastnormaltime:
501 if mtime > self._lastnormaltime:
502 # Remember the most recent modification timeslot for status(),
502 # Remember the most recent modification timeslot for status(),
503 # to make sure we won't miss future size-preserving file content
503 # to make sure we won't miss future size-preserving file content
504 # modifications that happen within the same timeslot.
504 # modifications that happen within the same timeslot.
505 self._lastnormaltime = mtime
505 self._lastnormaltime = mtime
506
506
507 def normallookup(self, f):
507 def normallookup(self, f):
508 '''Mark a file normal, but possibly dirty.'''
508 '''Mark a file normal, but possibly dirty.'''
509 if self._pl[1] != nullid and f in self._map:
509 if self._pl[1] != nullid and f in self._map:
510 # if there is a merge going on and the file was either
510 # if there is a merge going on and the file was either
511 # in state 'm' (-1) or coming from other parent (-2) before
511 # in state 'm' (-1) or coming from other parent (-2) before
512 # being removed, restore that state.
512 # being removed, restore that state.
513 entry = self._map[f]
513 entry = self._map[f]
514 if entry[0] == 'r' and entry[2] in (-1, -2):
514 if entry[0] == 'r' and entry[2] in (-1, -2):
515 source = self._copymap.get(f)
515 source = self._copymap.get(f)
516 if entry[2] == -1:
516 if entry[2] == -1:
517 self.merge(f)
517 self.merge(f)
518 elif entry[2] == -2:
518 elif entry[2] == -2:
519 self.otherparent(f)
519 self.otherparent(f)
520 if source:
520 if source:
521 self.copy(source, f)
521 self.copy(source, f)
522 return
522 return
523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
524 return
524 return
525 self._addpath(f, 'n', 0, -1, -1)
525 self._addpath(f, 'n', 0, -1, -1)
526 if f in self._copymap:
526 if f in self._copymap:
527 del self._copymap[f]
527 del self._copymap[f]
528 if f in self._nonnormalset:
528 if f in self._nonnormalset:
529 self._nonnormalset.remove(f)
529 self._nonnormalset.remove(f)
530
530
531 def otherparent(self, f):
531 def otherparent(self, f):
532 '''Mark as coming from the other parent, always dirty.'''
532 '''Mark as coming from the other parent, always dirty.'''
533 if self._pl[1] == nullid:
533 if self._pl[1] == nullid:
534 raise error.Abort(_("setting %r to other parent "
534 raise error.Abort(_("setting %r to other parent "
535 "only allowed in merges") % f)
535 "only allowed in merges") % f)
536 if f in self and self[f] == 'n':
536 if f in self and self[f] == 'n':
537 # merge-like
537 # merge-like
538 self._addpath(f, 'm', 0, -2, -1)
538 self._addpath(f, 'm', 0, -2, -1)
539 else:
539 else:
540 # add-like
540 # add-like
541 self._addpath(f, 'n', 0, -2, -1)
541 self._addpath(f, 'n', 0, -2, -1)
542
542
543 if f in self._copymap:
543 if f in self._copymap:
544 del self._copymap[f]
544 del self._copymap[f]
545
545
546 def add(self, f):
546 def add(self, f):
547 '''Mark a file added.'''
547 '''Mark a file added.'''
548 self._addpath(f, 'a', 0, -1, -1)
548 self._addpath(f, 'a', 0, -1, -1)
549 if f in self._copymap:
549 if f in self._copymap:
550 del self._copymap[f]
550 del self._copymap[f]
551
551
552 def remove(self, f):
552 def remove(self, f):
553 '''Mark a file removed.'''
553 '''Mark a file removed.'''
554 self._dirty = True
554 self._dirty = True
555 self._droppath(f)
555 self._droppath(f)
556 size = 0
556 size = 0
557 if self._pl[1] != nullid and f in self._map:
557 if self._pl[1] != nullid and f in self._map:
558 # backup the previous state
558 # backup the previous state
559 entry = self._map[f]
559 entry = self._map[f]
560 if entry[0] == 'm': # merge
560 if entry[0] == 'm': # merge
561 size = -1
561 size = -1
562 elif entry[0] == 'n' and entry[2] == -2: # other parent
562 elif entry[0] == 'n' and entry[2] == -2: # other parent
563 size = -2
563 size = -2
564 self._map[f] = dirstatetuple('r', 0, size, 0)
564 self._map[f] = dirstatetuple('r', 0, size, 0)
565 self._nonnormalset.add(f)
565 self._nonnormalset.add(f)
566 if size == 0 and f in self._copymap:
566 if size == 0 and f in self._copymap:
567 del self._copymap[f]
567 del self._copymap[f]
568
568
569 def merge(self, f):
569 def merge(self, f):
570 '''Mark a file merged.'''
570 '''Mark a file merged.'''
571 if self._pl[1] == nullid:
571 if self._pl[1] == nullid:
572 return self.normallookup(f)
572 return self.normallookup(f)
573 return self.otherparent(f)
573 return self.otherparent(f)
574
574
575 def drop(self, f):
575 def drop(self, f):
576 '''Drop a file from the dirstate'''
576 '''Drop a file from the dirstate'''
577 if f in self._map:
577 if f in self._map:
578 self._dirty = True
578 self._dirty = True
579 self._droppath(f)
579 self._droppath(f)
580 del self._map[f]
580 del self._map[f]
581 if f in self._nonnormalset:
581 if f in self._nonnormalset:
582 self._nonnormalset.remove(f)
582 self._nonnormalset.remove(f)
583
583
584 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
584 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
585 if exists is None:
585 if exists is None:
586 exists = os.path.lexists(os.path.join(self._root, path))
586 exists = os.path.lexists(os.path.join(self._root, path))
587 if not exists:
587 if not exists:
588 # Maybe a path component exists
588 # Maybe a path component exists
589 if not ignoremissing and '/' in path:
589 if not ignoremissing and '/' in path:
590 d, f = path.rsplit('/', 1)
590 d, f = path.rsplit('/', 1)
591 d = self._normalize(d, False, ignoremissing, None)
591 d = self._normalize(d, False, ignoremissing, None)
592 folded = d + "/" + f
592 folded = d + "/" + f
593 else:
593 else:
594 # No path components, preserve original case
594 # No path components, preserve original case
595 folded = path
595 folded = path
596 else:
596 else:
597 # recursively normalize leading directory components
597 # recursively normalize leading directory components
598 # against dirstate
598 # against dirstate
599 if '/' in normed:
599 if '/' in normed:
600 d, f = normed.rsplit('/', 1)
600 d, f = normed.rsplit('/', 1)
601 d = self._normalize(d, False, ignoremissing, True)
601 d = self._normalize(d, False, ignoremissing, True)
602 r = self._root + "/" + d
602 r = self._root + "/" + d
603 folded = d + "/" + util.fspath(f, r)
603 folded = d + "/" + util.fspath(f, r)
604 else:
604 else:
605 folded = util.fspath(normed, self._root)
605 folded = util.fspath(normed, self._root)
606 storemap[normed] = folded
606 storemap[normed] = folded
607
607
608 return folded
608 return folded
609
609
610 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
610 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
611 normed = util.normcase(path)
611 normed = util.normcase(path)
612 folded = self._filefoldmap.get(normed, None)
612 folded = self._filefoldmap.get(normed, None)
613 if folded is None:
613 if folded is None:
614 if isknown:
614 if isknown:
615 folded = path
615 folded = path
616 else:
616 else:
617 folded = self._discoverpath(path, normed, ignoremissing, exists,
617 folded = self._discoverpath(path, normed, ignoremissing, exists,
618 self._filefoldmap)
618 self._filefoldmap)
619 return folded
619 return folded
620
620
621 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
621 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
622 normed = util.normcase(path)
622 normed = util.normcase(path)
623 folded = self._filefoldmap.get(normed, None)
623 folded = self._filefoldmap.get(normed, None)
624 if folded is None:
624 if folded is None:
625 folded = self._dirfoldmap.get(normed, None)
625 folded = self._dirfoldmap.get(normed, None)
626 if folded is None:
626 if folded is None:
627 if isknown:
627 if isknown:
628 folded = path
628 folded = path
629 else:
629 else:
630 # store discovered result in dirfoldmap so that future
630 # store discovered result in dirfoldmap so that future
631 # normalizefile calls don't start matching directories
631 # normalizefile calls don't start matching directories
632 folded = self._discoverpath(path, normed, ignoremissing, exists,
632 folded = self._discoverpath(path, normed, ignoremissing, exists,
633 self._dirfoldmap)
633 self._dirfoldmap)
634 return folded
634 return folded
635
635
636 def normalize(self, path, isknown=False, ignoremissing=False):
636 def normalize(self, path, isknown=False, ignoremissing=False):
637 '''
637 '''
638 normalize the case of a pathname when on a casefolding filesystem
638 normalize the case of a pathname when on a casefolding filesystem
639
639
640 isknown specifies whether the filename came from walking the
640 isknown specifies whether the filename came from walking the
641 disk, to avoid extra filesystem access.
641 disk, to avoid extra filesystem access.
642
642
643 If ignoremissing is True, missing path are returned
643 If ignoremissing is True, missing path are returned
644 unchanged. Otherwise, we try harder to normalize possibly
644 unchanged. Otherwise, we try harder to normalize possibly
645 existing path components.
645 existing path components.
646
646
647 The normalized case is determined based on the following precedence:
647 The normalized case is determined based on the following precedence:
648
648
649 - version of name already stored in the dirstate
649 - version of name already stored in the dirstate
650 - version of name stored on disk
650 - version of name stored on disk
651 - version provided via command arguments
651 - version provided via command arguments
652 '''
652 '''
653
653
654 if self._checkcase:
654 if self._checkcase:
655 return self._normalize(path, isknown, ignoremissing)
655 return self._normalize(path, isknown, ignoremissing)
656 return path
656 return path
657
657
658 def clear(self):
658 def clear(self):
659 self._map = {}
659 self._map = {}
660 self._nonnormalset = set()
660 self._nonnormalset = set()
661 if "_dirs" in self.__dict__:
661 if "_dirs" in self.__dict__:
662 delattr(self, "_dirs")
662 delattr(self, "_dirs")
663 self._copymap = {}
663 self._copymap = {}
664 self._pl = [nullid, nullid]
664 self._pl = [nullid, nullid]
665 self._lastnormaltime = 0
665 self._lastnormaltime = 0
666 self._dirty = True
666 self._dirty = True
667
667
668 def rebuild(self, parent, allfiles, changedfiles=None):
668 def rebuild(self, parent, allfiles, changedfiles=None):
669 if changedfiles is None:
669 if changedfiles is None:
670 # Rebuild entire dirstate
670 # Rebuild entire dirstate
671 changedfiles = allfiles
671 changedfiles = allfiles
672 lastnormaltime = self._lastnormaltime
672 lastnormaltime = self._lastnormaltime
673 self.clear()
673 self.clear()
674 self._lastnormaltime = lastnormaltime
674 self._lastnormaltime = lastnormaltime
675
675
676 for f in changedfiles:
676 for f in changedfiles:
677 mode = 0o666
677 mode = 0o666
678 if f in allfiles and 'x' in allfiles.flags(f):
678 if f in allfiles and 'x' in allfiles.flags(f):
679 mode = 0o777
679 mode = 0o777
680
680
681 if f in allfiles:
681 if f in allfiles:
682 self._map[f] = dirstatetuple('n', mode, -1, 0)
682 self._map[f] = dirstatetuple('n', mode, -1, 0)
683 else:
683 else:
684 self._map.pop(f, None)
684 self._map.pop(f, None)
685 if f in self._nonnormalset:
685 if f in self._nonnormalset:
686 self._nonnormalset.remove(f)
686 self._nonnormalset.remove(f)
687
687
688 self._pl = (parent, nullid)
688 self._pl = (parent, nullid)
689 self._dirty = True
689 self._dirty = True
690
690
691 def write(self, tr=False):
691 def write(self, tr=False):
692 if not self._dirty:
692 if not self._dirty:
693 return
693 return
694
694
695 filename = self._filename
695 filename = self._filename
696 if tr is False: # not explicitly specified
696 if tr is False: # not explicitly specified
697 if (self._ui.configbool('devel', 'all-warnings')
697 self._ui.develwarn('use dirstate.write with '
698 or self._ui.configbool('devel', 'check-dirstate-write')):
698 'repo.currenttransaction()',
699 self._ui.develwarn('use dirstate.write with '
699 config='check-dirstate-write')
700 'repo.currenttransaction()')
701
700
702 if self._opener.lexists(self._pendingfilename):
701 if self._opener.lexists(self._pendingfilename):
703 # if pending file already exists, in-memory changes
702 # if pending file already exists, in-memory changes
704 # should be written into it, because it has priority
703 # should be written into it, because it has priority
705 # to '.hg/dirstate' at reading under HG_PENDING mode
704 # to '.hg/dirstate' at reading under HG_PENDING mode
706 filename = self._pendingfilename
705 filename = self._pendingfilename
707 elif tr:
706 elif tr:
708 # 'dirstate.write()' is not only for writing in-memory
707 # 'dirstate.write()' is not only for writing in-memory
709 # changes out, but also for dropping ambiguous timestamp.
708 # changes out, but also for dropping ambiguous timestamp.
710 # delayed writing re-raise "ambiguous timestamp issue".
709 # delayed writing re-raise "ambiguous timestamp issue".
711 # See also the wiki page below for detail:
710 # See also the wiki page below for detail:
712 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
711 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
713
712
714 # emulate dropping timestamp in 'parsers.pack_dirstate'
713 # emulate dropping timestamp in 'parsers.pack_dirstate'
715 now = _getfsnow(self._opener)
714 now = _getfsnow(self._opener)
716 dmap = self._map
715 dmap = self._map
717 for f, e in dmap.iteritems():
716 for f, e in dmap.iteritems():
718 if e[0] == 'n' and e[3] == now:
717 if e[0] == 'n' and e[3] == now:
719 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
718 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
720 self._nonnormalset.add(f)
719 self._nonnormalset.add(f)
721
720
722 # emulate that all 'dirstate.normal' results are written out
721 # emulate that all 'dirstate.normal' results are written out
723 self._lastnormaltime = 0
722 self._lastnormaltime = 0
724
723
725 # delay writing in-memory changes out
724 # delay writing in-memory changes out
726 tr.addfilegenerator('dirstate', (self._filename,),
725 tr.addfilegenerator('dirstate', (self._filename,),
727 self._writedirstate, location='plain')
726 self._writedirstate, location='plain')
728 return
727 return
729
728
730 st = self._opener(filename, "w", atomictemp=True)
729 st = self._opener(filename, "w", atomictemp=True)
731 self._writedirstate(st)
730 self._writedirstate(st)
732
731
733 def _writedirstate(self, st):
732 def _writedirstate(self, st):
734 # use the modification time of the newly created temporary file as the
733 # use the modification time of the newly created temporary file as the
735 # filesystem's notion of 'now'
734 # filesystem's notion of 'now'
736 now = util.fstat(st).st_mtime & _rangemask
735 now = util.fstat(st).st_mtime & _rangemask
737
736
738 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
737 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
739 # timestamp of each entries in dirstate, because of 'now > mtime'
738 # timestamp of each entries in dirstate, because of 'now > mtime'
740 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
739 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
741 if delaywrite > 0:
740 if delaywrite > 0:
742 # do we have any files to delay for?
741 # do we have any files to delay for?
743 for f, e in self._map.iteritems():
742 for f, e in self._map.iteritems():
744 if e[0] == 'n' and e[3] == now:
743 if e[0] == 'n' and e[3] == now:
745 import time # to avoid useless import
744 import time # to avoid useless import
746 # rather than sleep n seconds, sleep until the next
745 # rather than sleep n seconds, sleep until the next
747 # multiple of n seconds
746 # multiple of n seconds
748 clock = time.time()
747 clock = time.time()
749 start = int(clock) - (int(clock) % delaywrite)
748 start = int(clock) - (int(clock) % delaywrite)
750 end = start + delaywrite
749 end = start + delaywrite
751 time.sleep(end - clock)
750 time.sleep(end - clock)
752 break
751 break
753
752
754 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
753 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
755 self._nonnormalset = nonnormalentries(self._map)
754 self._nonnormalset = nonnormalentries(self._map)
756 st.close()
755 st.close()
757 self._lastnormaltime = 0
756 self._lastnormaltime = 0
758 self._dirty = self._dirtypl = False
757 self._dirty = self._dirtypl = False
759
758
760 def _dirignore(self, f):
759 def _dirignore(self, f):
761 if f == '.':
760 if f == '.':
762 return False
761 return False
763 if self._ignore(f):
762 if self._ignore(f):
764 return True
763 return True
765 for p in util.finddirs(f):
764 for p in util.finddirs(f):
766 if self._ignore(p):
765 if self._ignore(p):
767 return True
766 return True
768 return False
767 return False
769
768
770 def _ignorefiles(self):
769 def _ignorefiles(self):
771 files = []
770 files = []
772 if os.path.exists(self._join('.hgignore')):
771 if os.path.exists(self._join('.hgignore')):
773 files.append(self._join('.hgignore'))
772 files.append(self._join('.hgignore'))
774 for name, path in self._ui.configitems("ui"):
773 for name, path in self._ui.configitems("ui"):
775 if name == 'ignore' or name.startswith('ignore.'):
774 if name == 'ignore' or name.startswith('ignore.'):
776 # we need to use os.path.join here rather than self._join
775 # we need to use os.path.join here rather than self._join
777 # because path is arbitrary and user-specified
776 # because path is arbitrary and user-specified
778 files.append(os.path.join(self._rootdir, util.expandpath(path)))
777 files.append(os.path.join(self._rootdir, util.expandpath(path)))
779 return files
778 return files
780
779
781 def _ignorefileandline(self, f):
780 def _ignorefileandline(self, f):
782 files = collections.deque(self._ignorefiles())
781 files = collections.deque(self._ignorefiles())
783 visited = set()
782 visited = set()
784 while files:
783 while files:
785 i = files.popleft()
784 i = files.popleft()
786 patterns = matchmod.readpatternfile(i, self._ui.warn,
785 patterns = matchmod.readpatternfile(i, self._ui.warn,
787 sourceinfo=True)
786 sourceinfo=True)
788 for pattern, lineno, line in patterns:
787 for pattern, lineno, line in patterns:
789 kind, p = matchmod._patsplit(pattern, 'glob')
788 kind, p = matchmod._patsplit(pattern, 'glob')
790 if kind == "subinclude":
789 if kind == "subinclude":
791 if p not in visited:
790 if p not in visited:
792 files.append(p)
791 files.append(p)
793 continue
792 continue
794 m = matchmod.match(self._root, '', [], [pattern],
793 m = matchmod.match(self._root, '', [], [pattern],
795 warn=self._ui.warn)
794 warn=self._ui.warn)
796 if m(f):
795 if m(f):
797 return (i, lineno, line)
796 return (i, lineno, line)
798 visited.add(i)
797 visited.add(i)
799 return (None, -1, "")
798 return (None, -1, "")
800
799
801 def _walkexplicit(self, match, subrepos):
800 def _walkexplicit(self, match, subrepos):
802 '''Get stat data about the files explicitly specified by match.
801 '''Get stat data about the files explicitly specified by match.
803
802
804 Return a triple (results, dirsfound, dirsnotfound).
803 Return a triple (results, dirsfound, dirsnotfound).
805 - results is a mapping from filename to stat result. It also contains
804 - results is a mapping from filename to stat result. It also contains
806 listings mapping subrepos and .hg to None.
805 listings mapping subrepos and .hg to None.
807 - dirsfound is a list of files found to be directories.
806 - dirsfound is a list of files found to be directories.
808 - dirsnotfound is a list of files that the dirstate thinks are
807 - dirsnotfound is a list of files that the dirstate thinks are
809 directories and that were not found.'''
808 directories and that were not found.'''
810
809
811 def badtype(mode):
810 def badtype(mode):
812 kind = _('unknown')
811 kind = _('unknown')
813 if stat.S_ISCHR(mode):
812 if stat.S_ISCHR(mode):
814 kind = _('character device')
813 kind = _('character device')
815 elif stat.S_ISBLK(mode):
814 elif stat.S_ISBLK(mode):
816 kind = _('block device')
815 kind = _('block device')
817 elif stat.S_ISFIFO(mode):
816 elif stat.S_ISFIFO(mode):
818 kind = _('fifo')
817 kind = _('fifo')
819 elif stat.S_ISSOCK(mode):
818 elif stat.S_ISSOCK(mode):
820 kind = _('socket')
819 kind = _('socket')
821 elif stat.S_ISDIR(mode):
820 elif stat.S_ISDIR(mode):
822 kind = _('directory')
821 kind = _('directory')
823 return _('unsupported file type (type is %s)') % kind
822 return _('unsupported file type (type is %s)') % kind
824
823
825 matchedir = match.explicitdir
824 matchedir = match.explicitdir
826 badfn = match.bad
825 badfn = match.bad
827 dmap = self._map
826 dmap = self._map
828 lstat = os.lstat
827 lstat = os.lstat
829 getkind = stat.S_IFMT
828 getkind = stat.S_IFMT
830 dirkind = stat.S_IFDIR
829 dirkind = stat.S_IFDIR
831 regkind = stat.S_IFREG
830 regkind = stat.S_IFREG
832 lnkkind = stat.S_IFLNK
831 lnkkind = stat.S_IFLNK
833 join = self._join
832 join = self._join
834 dirsfound = []
833 dirsfound = []
835 foundadd = dirsfound.append
834 foundadd = dirsfound.append
836 dirsnotfound = []
835 dirsnotfound = []
837 notfoundadd = dirsnotfound.append
836 notfoundadd = dirsnotfound.append
838
837
839 if not match.isexact() and self._checkcase:
838 if not match.isexact() and self._checkcase:
840 normalize = self._normalize
839 normalize = self._normalize
841 else:
840 else:
842 normalize = None
841 normalize = None
843
842
844 files = sorted(match.files())
843 files = sorted(match.files())
845 subrepos.sort()
844 subrepos.sort()
846 i, j = 0, 0
845 i, j = 0, 0
847 while i < len(files) and j < len(subrepos):
846 while i < len(files) and j < len(subrepos):
848 subpath = subrepos[j] + "/"
847 subpath = subrepos[j] + "/"
849 if files[i] < subpath:
848 if files[i] < subpath:
850 i += 1
849 i += 1
851 continue
850 continue
852 while i < len(files) and files[i].startswith(subpath):
851 while i < len(files) and files[i].startswith(subpath):
853 del files[i]
852 del files[i]
854 j += 1
853 j += 1
855
854
856 if not files or '.' in files:
855 if not files or '.' in files:
857 files = ['.']
856 files = ['.']
858 results = dict.fromkeys(subrepos)
857 results = dict.fromkeys(subrepos)
859 results['.hg'] = None
858 results['.hg'] = None
860
859
861 alldirs = None
860 alldirs = None
862 for ff in files:
861 for ff in files:
863 # constructing the foldmap is expensive, so don't do it for the
862 # constructing the foldmap is expensive, so don't do it for the
864 # common case where files is ['.']
863 # common case where files is ['.']
865 if normalize and ff != '.':
864 if normalize and ff != '.':
866 nf = normalize(ff, False, True)
865 nf = normalize(ff, False, True)
867 else:
866 else:
868 nf = ff
867 nf = ff
869 if nf in results:
868 if nf in results:
870 continue
869 continue
871
870
872 try:
871 try:
873 st = lstat(join(nf))
872 st = lstat(join(nf))
874 kind = getkind(st.st_mode)
873 kind = getkind(st.st_mode)
875 if kind == dirkind:
874 if kind == dirkind:
876 if nf in dmap:
875 if nf in dmap:
877 # file replaced by dir on disk but still in dirstate
876 # file replaced by dir on disk but still in dirstate
878 results[nf] = None
877 results[nf] = None
879 if matchedir:
878 if matchedir:
880 matchedir(nf)
879 matchedir(nf)
881 foundadd((nf, ff))
880 foundadd((nf, ff))
882 elif kind == regkind or kind == lnkkind:
881 elif kind == regkind or kind == lnkkind:
883 results[nf] = st
882 results[nf] = st
884 else:
883 else:
885 badfn(ff, badtype(kind))
884 badfn(ff, badtype(kind))
886 if nf in dmap:
885 if nf in dmap:
887 results[nf] = None
886 results[nf] = None
888 except OSError as inst: # nf not found on disk - it is dirstate only
887 except OSError as inst: # nf not found on disk - it is dirstate only
889 if nf in dmap: # does it exactly match a missing file?
888 if nf in dmap: # does it exactly match a missing file?
890 results[nf] = None
889 results[nf] = None
891 else: # does it match a missing directory?
890 else: # does it match a missing directory?
892 if alldirs is None:
891 if alldirs is None:
893 alldirs = util.dirs(dmap)
892 alldirs = util.dirs(dmap)
894 if nf in alldirs:
893 if nf in alldirs:
895 if matchedir:
894 if matchedir:
896 matchedir(nf)
895 matchedir(nf)
897 notfoundadd(nf)
896 notfoundadd(nf)
898 else:
897 else:
899 badfn(ff, inst.strerror)
898 badfn(ff, inst.strerror)
900
899
901 # Case insensitive filesystems cannot rely on lstat() failing to detect
900 # Case insensitive filesystems cannot rely on lstat() failing to detect
902 # a case-only rename. Prune the stat object for any file that does not
901 # a case-only rename. Prune the stat object for any file that does not
903 # match the case in the filesystem, if there are multiple files that
902 # match the case in the filesystem, if there are multiple files that
904 # normalize to the same path.
903 # normalize to the same path.
905 if match.isexact() and self._checkcase:
904 if match.isexact() and self._checkcase:
906 normed = {}
905 normed = {}
907
906
908 for f, st in results.iteritems():
907 for f, st in results.iteritems():
909 if st is None:
908 if st is None:
910 continue
909 continue
911
910
912 nc = util.normcase(f)
911 nc = util.normcase(f)
913 paths = normed.get(nc)
912 paths = normed.get(nc)
914
913
915 if paths is None:
914 if paths is None:
916 paths = set()
915 paths = set()
917 normed[nc] = paths
916 normed[nc] = paths
918
917
919 paths.add(f)
918 paths.add(f)
920
919
921 for norm, paths in normed.iteritems():
920 for norm, paths in normed.iteritems():
922 if len(paths) > 1:
921 if len(paths) > 1:
923 for path in paths:
922 for path in paths:
924 folded = self._discoverpath(path, norm, True, None,
923 folded = self._discoverpath(path, norm, True, None,
925 self._dirfoldmap)
924 self._dirfoldmap)
926 if path != folded:
925 if path != folded:
927 results[path] = None
926 results[path] = None
928
927
929 return results, dirsfound, dirsnotfound
928 return results, dirsfound, dirsnotfound
930
929
931 def walk(self, match, subrepos, unknown, ignored, full=True):
930 def walk(self, match, subrepos, unknown, ignored, full=True):
932 '''
931 '''
933 Walk recursively through the directory tree, finding all files
932 Walk recursively through the directory tree, finding all files
934 matched by match.
933 matched by match.
935
934
936 If full is False, maybe skip some known-clean files.
935 If full is False, maybe skip some known-clean files.
937
936
938 Return a dict mapping filename to stat-like object (either
937 Return a dict mapping filename to stat-like object (either
939 mercurial.osutil.stat instance or return value of os.stat()).
938 mercurial.osutil.stat instance or return value of os.stat()).
940
939
941 '''
940 '''
942 # full is a flag that extensions that hook into walk can use -- this
941 # full is a flag that extensions that hook into walk can use -- this
943 # implementation doesn't use it at all. This satisfies the contract
942 # implementation doesn't use it at all. This satisfies the contract
944 # because we only guarantee a "maybe".
943 # because we only guarantee a "maybe".
945
944
946 if ignored:
945 if ignored:
947 ignore = util.never
946 ignore = util.never
948 dirignore = util.never
947 dirignore = util.never
949 elif unknown:
948 elif unknown:
950 ignore = self._ignore
949 ignore = self._ignore
951 dirignore = self._dirignore
950 dirignore = self._dirignore
952 else:
951 else:
953 # if not unknown and not ignored, drop dir recursion and step 2
952 # if not unknown and not ignored, drop dir recursion and step 2
954 ignore = util.always
953 ignore = util.always
955 dirignore = util.always
954 dirignore = util.always
956
955
957 matchfn = match.matchfn
956 matchfn = match.matchfn
958 matchalways = match.always()
957 matchalways = match.always()
959 matchtdir = match.traversedir
958 matchtdir = match.traversedir
960 dmap = self._map
959 dmap = self._map
961 listdir = osutil.listdir
960 listdir = osutil.listdir
962 lstat = os.lstat
961 lstat = os.lstat
963 dirkind = stat.S_IFDIR
962 dirkind = stat.S_IFDIR
964 regkind = stat.S_IFREG
963 regkind = stat.S_IFREG
965 lnkkind = stat.S_IFLNK
964 lnkkind = stat.S_IFLNK
966 join = self._join
965 join = self._join
967
966
968 exact = skipstep3 = False
967 exact = skipstep3 = False
969 if match.isexact(): # match.exact
968 if match.isexact(): # match.exact
970 exact = True
969 exact = True
971 dirignore = util.always # skip step 2
970 dirignore = util.always # skip step 2
972 elif match.prefix(): # match.match, no patterns
971 elif match.prefix(): # match.match, no patterns
973 skipstep3 = True
972 skipstep3 = True
974
973
975 if not exact and self._checkcase:
974 if not exact and self._checkcase:
976 normalize = self._normalize
975 normalize = self._normalize
977 normalizefile = self._normalizefile
976 normalizefile = self._normalizefile
978 skipstep3 = False
977 skipstep3 = False
979 else:
978 else:
980 normalize = self._normalize
979 normalize = self._normalize
981 normalizefile = None
980 normalizefile = None
982
981
983 # step 1: find all explicit files
982 # step 1: find all explicit files
984 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
983 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
985
984
986 skipstep3 = skipstep3 and not (work or dirsnotfound)
985 skipstep3 = skipstep3 and not (work or dirsnotfound)
987 work = [d for d in work if not dirignore(d[0])]
986 work = [d for d in work if not dirignore(d[0])]
988
987
989 # step 2: visit subdirectories
988 # step 2: visit subdirectories
990 def traverse(work, alreadynormed):
989 def traverse(work, alreadynormed):
991 wadd = work.append
990 wadd = work.append
992 while work:
991 while work:
993 nd = work.pop()
992 nd = work.pop()
994 skip = None
993 skip = None
995 if nd == '.':
994 if nd == '.':
996 nd = ''
995 nd = ''
997 else:
996 else:
998 skip = '.hg'
997 skip = '.hg'
999 try:
998 try:
1000 entries = listdir(join(nd), stat=True, skip=skip)
999 entries = listdir(join(nd), stat=True, skip=skip)
1001 except OSError as inst:
1000 except OSError as inst:
1002 if inst.errno in (errno.EACCES, errno.ENOENT):
1001 if inst.errno in (errno.EACCES, errno.ENOENT):
1003 match.bad(self.pathto(nd), inst.strerror)
1002 match.bad(self.pathto(nd), inst.strerror)
1004 continue
1003 continue
1005 raise
1004 raise
1006 for f, kind, st in entries:
1005 for f, kind, st in entries:
1007 if normalizefile:
1006 if normalizefile:
1008 # even though f might be a directory, we're only
1007 # even though f might be a directory, we're only
1009 # interested in comparing it to files currently in the
1008 # interested in comparing it to files currently in the
1010 # dmap -- therefore normalizefile is enough
1009 # dmap -- therefore normalizefile is enough
1011 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1010 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1012 True)
1011 True)
1013 else:
1012 else:
1014 nf = nd and (nd + "/" + f) or f
1013 nf = nd and (nd + "/" + f) or f
1015 if nf not in results:
1014 if nf not in results:
1016 if kind == dirkind:
1015 if kind == dirkind:
1017 if not ignore(nf):
1016 if not ignore(nf):
1018 if matchtdir:
1017 if matchtdir:
1019 matchtdir(nf)
1018 matchtdir(nf)
1020 wadd(nf)
1019 wadd(nf)
1021 if nf in dmap and (matchalways or matchfn(nf)):
1020 if nf in dmap and (matchalways or matchfn(nf)):
1022 results[nf] = None
1021 results[nf] = None
1023 elif kind == regkind or kind == lnkkind:
1022 elif kind == regkind or kind == lnkkind:
1024 if nf in dmap:
1023 if nf in dmap:
1025 if matchalways or matchfn(nf):
1024 if matchalways or matchfn(nf):
1026 results[nf] = st
1025 results[nf] = st
1027 elif ((matchalways or matchfn(nf))
1026 elif ((matchalways or matchfn(nf))
1028 and not ignore(nf)):
1027 and not ignore(nf)):
1029 # unknown file -- normalize if necessary
1028 # unknown file -- normalize if necessary
1030 if not alreadynormed:
1029 if not alreadynormed:
1031 nf = normalize(nf, False, True)
1030 nf = normalize(nf, False, True)
1032 results[nf] = st
1031 results[nf] = st
1033 elif nf in dmap and (matchalways or matchfn(nf)):
1032 elif nf in dmap and (matchalways or matchfn(nf)):
1034 results[nf] = None
1033 results[nf] = None
1035
1034
1036 for nd, d in work:
1035 for nd, d in work:
1037 # alreadynormed means that processwork doesn't have to do any
1036 # alreadynormed means that processwork doesn't have to do any
1038 # expensive directory normalization
1037 # expensive directory normalization
1039 alreadynormed = not normalize or nd == d
1038 alreadynormed = not normalize or nd == d
1040 traverse([d], alreadynormed)
1039 traverse([d], alreadynormed)
1041
1040
1042 for s in subrepos:
1041 for s in subrepos:
1043 del results[s]
1042 del results[s]
1044 del results['.hg']
1043 del results['.hg']
1045
1044
1046 # step 3: visit remaining files from dmap
1045 # step 3: visit remaining files from dmap
1047 if not skipstep3 and not exact:
1046 if not skipstep3 and not exact:
1048 # If a dmap file is not in results yet, it was either
1047 # If a dmap file is not in results yet, it was either
1049 # a) not matching matchfn b) ignored, c) missing, or d) under a
1048 # a) not matching matchfn b) ignored, c) missing, or d) under a
1050 # symlink directory.
1049 # symlink directory.
1051 if not results and matchalways:
1050 if not results and matchalways:
1052 visit = dmap.keys()
1051 visit = dmap.keys()
1053 else:
1052 else:
1054 visit = [f for f in dmap if f not in results and matchfn(f)]
1053 visit = [f for f in dmap if f not in results and matchfn(f)]
1055 visit.sort()
1054 visit.sort()
1056
1055
1057 if unknown:
1056 if unknown:
1058 # unknown == True means we walked all dirs under the roots
1057 # unknown == True means we walked all dirs under the roots
1059 # that wasn't ignored, and everything that matched was stat'ed
1058 # that wasn't ignored, and everything that matched was stat'ed
1060 # and is already in results.
1059 # and is already in results.
1061 # The rest must thus be ignored or under a symlink.
1060 # The rest must thus be ignored or under a symlink.
1062 audit_path = pathutil.pathauditor(self._root)
1061 audit_path = pathutil.pathauditor(self._root)
1063
1062
1064 for nf in iter(visit):
1063 for nf in iter(visit):
1065 # If a stat for the same file was already added with a
1064 # If a stat for the same file was already added with a
1066 # different case, don't add one for this, since that would
1065 # different case, don't add one for this, since that would
1067 # make it appear as if the file exists under both names
1066 # make it appear as if the file exists under both names
1068 # on disk.
1067 # on disk.
1069 if (normalizefile and
1068 if (normalizefile and
1070 normalizefile(nf, True, True) in results):
1069 normalizefile(nf, True, True) in results):
1071 results[nf] = None
1070 results[nf] = None
1072 # Report ignored items in the dmap as long as they are not
1071 # Report ignored items in the dmap as long as they are not
1073 # under a symlink directory.
1072 # under a symlink directory.
1074 elif audit_path.check(nf):
1073 elif audit_path.check(nf):
1075 try:
1074 try:
1076 results[nf] = lstat(join(nf))
1075 results[nf] = lstat(join(nf))
1077 # file was just ignored, no links, and exists
1076 # file was just ignored, no links, and exists
1078 except OSError:
1077 except OSError:
1079 # file doesn't exist
1078 # file doesn't exist
1080 results[nf] = None
1079 results[nf] = None
1081 else:
1080 else:
1082 # It's either missing or under a symlink directory
1081 # It's either missing or under a symlink directory
1083 # which we in this case report as missing
1082 # which we in this case report as missing
1084 results[nf] = None
1083 results[nf] = None
1085 else:
1084 else:
1086 # We may not have walked the full directory tree above,
1085 # We may not have walked the full directory tree above,
1087 # so stat and check everything we missed.
1086 # so stat and check everything we missed.
1088 nf = iter(visit).next
1087 nf = iter(visit).next
1089 for st in util.statfiles([join(i) for i in visit]):
1088 for st in util.statfiles([join(i) for i in visit]):
1090 results[nf()] = st
1089 results[nf()] = st
1091 return results
1090 return results
1092
1091
1093 def status(self, match, subrepos, ignored, clean, unknown):
1092 def status(self, match, subrepos, ignored, clean, unknown):
1094 '''Determine the status of the working copy relative to the
1093 '''Determine the status of the working copy relative to the
1095 dirstate and return a pair of (unsure, status), where status is of type
1094 dirstate and return a pair of (unsure, status), where status is of type
1096 scmutil.status and:
1095 scmutil.status and:
1097
1096
1098 unsure:
1097 unsure:
1099 files that might have been modified since the dirstate was
1098 files that might have been modified since the dirstate was
1100 written, but need to be read to be sure (size is the same
1099 written, but need to be read to be sure (size is the same
1101 but mtime differs)
1100 but mtime differs)
1102 status.modified:
1101 status.modified:
1103 files that have definitely been modified since the dirstate
1102 files that have definitely been modified since the dirstate
1104 was written (different size or mode)
1103 was written (different size or mode)
1105 status.clean:
1104 status.clean:
1106 files that have definitely not been modified since the
1105 files that have definitely not been modified since the
1107 dirstate was written
1106 dirstate was written
1108 '''
1107 '''
1109 listignored, listclean, listunknown = ignored, clean, unknown
1108 listignored, listclean, listunknown = ignored, clean, unknown
1110 lookup, modified, added, unknown, ignored = [], [], [], [], []
1109 lookup, modified, added, unknown, ignored = [], [], [], [], []
1111 removed, deleted, clean = [], [], []
1110 removed, deleted, clean = [], [], []
1112
1111
1113 dmap = self._map
1112 dmap = self._map
1114 ladd = lookup.append # aka "unsure"
1113 ladd = lookup.append # aka "unsure"
1115 madd = modified.append
1114 madd = modified.append
1116 aadd = added.append
1115 aadd = added.append
1117 uadd = unknown.append
1116 uadd = unknown.append
1118 iadd = ignored.append
1117 iadd = ignored.append
1119 radd = removed.append
1118 radd = removed.append
1120 dadd = deleted.append
1119 dadd = deleted.append
1121 cadd = clean.append
1120 cadd = clean.append
1122 mexact = match.exact
1121 mexact = match.exact
1123 dirignore = self._dirignore
1122 dirignore = self._dirignore
1124 checkexec = self._checkexec
1123 checkexec = self._checkexec
1125 copymap = self._copymap
1124 copymap = self._copymap
1126 lastnormaltime = self._lastnormaltime
1125 lastnormaltime = self._lastnormaltime
1127
1126
1128 # We need to do full walks when either
1127 # We need to do full walks when either
1129 # - we're listing all clean files, or
1128 # - we're listing all clean files, or
1130 # - match.traversedir does something, because match.traversedir should
1129 # - match.traversedir does something, because match.traversedir should
1131 # be called for every dir in the working dir
1130 # be called for every dir in the working dir
1132 full = listclean or match.traversedir is not None
1131 full = listclean or match.traversedir is not None
1133 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1132 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1134 full=full).iteritems():
1133 full=full).iteritems():
1135 if fn not in dmap:
1134 if fn not in dmap:
1136 if (listignored or mexact(fn)) and dirignore(fn):
1135 if (listignored or mexact(fn)) and dirignore(fn):
1137 if listignored:
1136 if listignored:
1138 iadd(fn)
1137 iadd(fn)
1139 else:
1138 else:
1140 uadd(fn)
1139 uadd(fn)
1141 continue
1140 continue
1142
1141
1143 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1142 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1144 # written like that for performance reasons. dmap[fn] is not a
1143 # written like that for performance reasons. dmap[fn] is not a
1145 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1144 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1146 # opcode has fast paths when the value to be unpacked is a tuple or
1145 # opcode has fast paths when the value to be unpacked is a tuple or
1147 # a list, but falls back to creating a full-fledged iterator in
1146 # a list, but falls back to creating a full-fledged iterator in
1148 # general. That is much slower than simply accessing and storing the
1147 # general. That is much slower than simply accessing and storing the
1149 # tuple members one by one.
1148 # tuple members one by one.
1150 t = dmap[fn]
1149 t = dmap[fn]
1151 state = t[0]
1150 state = t[0]
1152 mode = t[1]
1151 mode = t[1]
1153 size = t[2]
1152 size = t[2]
1154 time = t[3]
1153 time = t[3]
1155
1154
1156 if not st and state in "nma":
1155 if not st and state in "nma":
1157 dadd(fn)
1156 dadd(fn)
1158 elif state == 'n':
1157 elif state == 'n':
1159 if (size >= 0 and
1158 if (size >= 0 and
1160 ((size != st.st_size and size != st.st_size & _rangemask)
1159 ((size != st.st_size and size != st.st_size & _rangemask)
1161 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1160 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1162 or size == -2 # other parent
1161 or size == -2 # other parent
1163 or fn in copymap):
1162 or fn in copymap):
1164 madd(fn)
1163 madd(fn)
1165 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1164 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1166 ladd(fn)
1165 ladd(fn)
1167 elif st.st_mtime == lastnormaltime:
1166 elif st.st_mtime == lastnormaltime:
1168 # fn may have just been marked as normal and it may have
1167 # fn may have just been marked as normal and it may have
1169 # changed in the same second without changing its size.
1168 # changed in the same second without changing its size.
1170 # This can happen if we quickly do multiple commits.
1169 # This can happen if we quickly do multiple commits.
1171 # Force lookup, so we don't miss such a racy file change.
1170 # Force lookup, so we don't miss such a racy file change.
1172 ladd(fn)
1171 ladd(fn)
1173 elif listclean:
1172 elif listclean:
1174 cadd(fn)
1173 cadd(fn)
1175 elif state == 'm':
1174 elif state == 'm':
1176 madd(fn)
1175 madd(fn)
1177 elif state == 'a':
1176 elif state == 'a':
1178 aadd(fn)
1177 aadd(fn)
1179 elif state == 'r':
1178 elif state == 'r':
1180 radd(fn)
1179 radd(fn)
1181
1180
1182 return (lookup, scmutil.status(modified, added, removed, deleted,
1181 return (lookup, scmutil.status(modified, added, removed, deleted,
1183 unknown, ignored, clean))
1182 unknown, ignored, clean))
1184
1183
1185 def matches(self, match):
1184 def matches(self, match):
1186 '''
1185 '''
1187 return files in the dirstate (in whatever state) filtered by match
1186 return files in the dirstate (in whatever state) filtered by match
1188 '''
1187 '''
1189 dmap = self._map
1188 dmap = self._map
1190 if match.always():
1189 if match.always():
1191 return dmap.keys()
1190 return dmap.keys()
1192 files = match.files()
1191 files = match.files()
1193 if match.isexact():
1192 if match.isexact():
1194 # fast path -- filter the other way around, since typically files is
1193 # fast path -- filter the other way around, since typically files is
1195 # much smaller than dmap
1194 # much smaller than dmap
1196 return [f for f in files if f in dmap]
1195 return [f for f in files if f in dmap]
1197 if match.prefix() and all(fn in dmap for fn in files):
1196 if match.prefix() and all(fn in dmap for fn in files):
1198 # fast path -- all the values are known to be files, so just return
1197 # fast path -- all the values are known to be files, so just return
1199 # that
1198 # that
1200 return list(files)
1199 return list(files)
1201 return [f for f in dmap if match(f)]
1200 return [f for f in dmap if match(f)]
1202
1201
1203 def _actualfilename(self, tr):
1202 def _actualfilename(self, tr):
1204 if tr:
1203 if tr:
1205 return self._pendingfilename
1204 return self._pendingfilename
1206 else:
1205 else:
1207 return self._filename
1206 return self._filename
1208
1207
1209 def _savebackup(self, tr, suffix):
1208 def _savebackup(self, tr, suffix):
1210 '''Save current dirstate into backup file with suffix'''
1209 '''Save current dirstate into backup file with suffix'''
1211 filename = self._actualfilename(tr)
1210 filename = self._actualfilename(tr)
1212
1211
1213 # use '_writedirstate' instead of 'write' to write changes certainly,
1212 # use '_writedirstate' instead of 'write' to write changes certainly,
1214 # because the latter omits writing out if transaction is running.
1213 # because the latter omits writing out if transaction is running.
1215 # output file will be used to create backup of dirstate at this point.
1214 # output file will be used to create backup of dirstate at this point.
1216 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1215 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1217
1216
1218 if tr:
1217 if tr:
1219 # ensure that subsequent tr.writepending returns True for
1218 # ensure that subsequent tr.writepending returns True for
1220 # changes written out above, even if dirstate is never
1219 # changes written out above, even if dirstate is never
1221 # changed after this
1220 # changed after this
1222 tr.addfilegenerator('dirstate', (self._filename,),
1221 tr.addfilegenerator('dirstate', (self._filename,),
1223 self._writedirstate, location='plain')
1222 self._writedirstate, location='plain')
1224
1223
1225 # ensure that pending file written above is unlinked at
1224 # ensure that pending file written above is unlinked at
1226 # failure, even if tr.writepending isn't invoked until the
1225 # failure, even if tr.writepending isn't invoked until the
1227 # end of this transaction
1226 # end of this transaction
1228 tr.registertmp(filename, location='plain')
1227 tr.registertmp(filename, location='plain')
1229
1228
1230 self._opener.write(filename + suffix, self._opener.tryread(filename))
1229 self._opener.write(filename + suffix, self._opener.tryread(filename))
1231
1230
1232 def _restorebackup(self, tr, suffix):
1231 def _restorebackup(self, tr, suffix):
1233 '''Restore dirstate by backup file with suffix'''
1232 '''Restore dirstate by backup file with suffix'''
1234 # this "invalidate()" prevents "wlock.release()" from writing
1233 # this "invalidate()" prevents "wlock.release()" from writing
1235 # changes of dirstate out after restoring from backup file
1234 # changes of dirstate out after restoring from backup file
1236 self.invalidate()
1235 self.invalidate()
1237 filename = self._actualfilename(tr)
1236 filename = self._actualfilename(tr)
1238 self._opener.rename(filename + suffix, filename)
1237 self._opener.rename(filename + suffix, filename)
1239
1238
1240 def _clearbackup(self, tr, suffix):
1239 def _clearbackup(self, tr, suffix):
1241 '''Clear backup file with suffix'''
1240 '''Clear backup file with suffix'''
1242 filename = self._actualfilename(tr)
1241 filename = self._actualfilename(tr)
1243 self._opener.unlink(filename + suffix)
1242 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now