##// END OF EJS Templates
dirstate: extract logic to compute the list of ignorefiles...
Laurent Charignon -
r27594:0921caca default
parent child Browse files
Show More
@@ -1,1219 +1,1222 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 osutil,
20 osutil,
21 parsers,
21 parsers,
22 pathutil,
22 pathutil,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 propertycache = util.propertycache
27 propertycache = util.propertycache
28 filecache = scmutil.filecache
28 filecache = scmutil.filecache
29 _rangemask = 0x7fffffff
29 _rangemask = 0x7fffffff
30
30
31 dirstatetuple = parsers.dirstatetuple
31 dirstatetuple = parsers.dirstatetuple
32
32
33 class repocache(filecache):
33 class repocache(filecache):
34 """filecache for files in .hg/"""
34 """filecache for files in .hg/"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj._opener.join(fname)
36 return obj._opener.join(fname)
37
37
38 class rootcache(filecache):
38 class rootcache(filecache):
39 """filecache for files in the repository root"""
39 """filecache for files in the repository root"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._join(fname)
41 return obj._join(fname)
42
42
43 def _getfsnow(vfs):
43 def _getfsnow(vfs):
44 '''Get "now" timestamp on filesystem'''
44 '''Get "now" timestamp on filesystem'''
45 tmpfd, tmpname = vfs.mkstemp()
45 tmpfd, tmpname = vfs.mkstemp()
46 try:
46 try:
47 return os.fstat(tmpfd).st_mtime
47 return os.fstat(tmpfd).st_mtime
48 finally:
48 finally:
49 os.close(tmpfd)
49 os.close(tmpfd)
50 vfs.unlink(tmpname)
50 vfs.unlink(tmpname)
51
51
52 def nonnormalentries(dmap):
52 def nonnormalentries(dmap):
53 '''Compute the nonnormal dirstate entries from the dmap'''
53 '''Compute the nonnormal dirstate entries from the dmap'''
54 try:
54 try:
55 return parsers.nonnormalentries(dmap)
55 return parsers.nonnormalentries(dmap)
56 except AttributeError:
56 except AttributeError:
57 return set(fname for fname, e in dmap.iteritems()
57 return set(fname for fname, e in dmap.iteritems()
58 if e[0] != 'n' or e[3] == -1)
58 if e[0] != 'n' or e[3] == -1)
59
59
60 def _trypending(root, vfs, filename):
60 def _trypending(root, vfs, filename):
61 '''Open file to be read according to HG_PENDING environment variable
61 '''Open file to be read according to HG_PENDING environment variable
62
62
63 This opens '.pending' of specified 'filename' only when HG_PENDING
63 This opens '.pending' of specified 'filename' only when HG_PENDING
64 is equal to 'root'.
64 is equal to 'root'.
65
65
66 This returns '(fp, is_pending_opened)' tuple.
66 This returns '(fp, is_pending_opened)' tuple.
67 '''
67 '''
68 if root == os.environ.get('HG_PENDING'):
68 if root == os.environ.get('HG_PENDING'):
69 try:
69 try:
70 return (vfs('%s.pending' % filename), True)
70 return (vfs('%s.pending' % filename), True)
71 except IOError as inst:
71 except IOError as inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 return (vfs(filename), False)
74 return (vfs(filename), False)
75
75
76 class dirstate(object):
76 class dirstate(object):
77
77
78 def __init__(self, opener, ui, root, validate):
78 def __init__(self, opener, ui, root, validate):
79 '''Create a new dirstate object.
79 '''Create a new dirstate object.
80
80
81 opener is an open()-like callable that can be used to open the
81 opener is an open()-like callable that can be used to open the
82 dirstate file; root is the root of the directory tracked by
82 dirstate file; root is the root of the directory tracked by
83 the dirstate.
83 the dirstate.
84 '''
84 '''
85 self._opener = opener
85 self._opener = opener
86 self._validate = validate
86 self._validate = validate
87 self._root = root
87 self._root = root
88 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
88 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
89 # UNC path pointing to root share (issue4557)
89 # UNC path pointing to root share (issue4557)
90 self._rootdir = pathutil.normasprefix(root)
90 self._rootdir = pathutil.normasprefix(root)
91 # internal config: ui.forcecwd
91 # internal config: ui.forcecwd
92 forcecwd = ui.config('ui', 'forcecwd')
92 forcecwd = ui.config('ui', 'forcecwd')
93 if forcecwd:
93 if forcecwd:
94 self._cwd = forcecwd
94 self._cwd = forcecwd
95 self._dirty = False
95 self._dirty = False
96 self._dirtypl = False
96 self._dirtypl = False
97 self._lastnormaltime = 0
97 self._lastnormaltime = 0
98 self._ui = ui
98 self._ui = ui
99 self._filecache = {}
99 self._filecache = {}
100 self._parentwriters = 0
100 self._parentwriters = 0
101 self._filename = 'dirstate'
101 self._filename = 'dirstate'
102 self._pendingfilename = '%s.pending' % self._filename
102 self._pendingfilename = '%s.pending' % self._filename
103
103
104 # for consistent view between _pl() and _read() invocations
104 # for consistent view between _pl() and _read() invocations
105 self._pendingmode = None
105 self._pendingmode = None
106
106
107 def beginparentchange(self):
107 def beginparentchange(self):
108 '''Marks the beginning of a set of changes that involve changing
108 '''Marks the beginning of a set of changes that involve changing
109 the dirstate parents. If there is an exception during this time,
109 the dirstate parents. If there is an exception during this time,
110 the dirstate will not be written when the wlock is released. This
110 the dirstate will not be written when the wlock is released. This
111 prevents writing an incoherent dirstate where the parent doesn't
111 prevents writing an incoherent dirstate where the parent doesn't
112 match the contents.
112 match the contents.
113 '''
113 '''
114 self._parentwriters += 1
114 self._parentwriters += 1
115
115
116 def endparentchange(self):
116 def endparentchange(self):
117 '''Marks the end of a set of changes that involve changing the
117 '''Marks the end of a set of changes that involve changing the
118 dirstate parents. Once all parent changes have been marked done,
118 dirstate parents. Once all parent changes have been marked done,
119 the wlock will be free to write the dirstate on release.
119 the wlock will be free to write the dirstate on release.
120 '''
120 '''
121 if self._parentwriters > 0:
121 if self._parentwriters > 0:
122 self._parentwriters -= 1
122 self._parentwriters -= 1
123
123
124 def pendingparentchange(self):
124 def pendingparentchange(self):
125 '''Returns true if the dirstate is in the middle of a set of changes
125 '''Returns true if the dirstate is in the middle of a set of changes
126 that modify the dirstate parent.
126 that modify the dirstate parent.
127 '''
127 '''
128 return self._parentwriters > 0
128 return self._parentwriters > 0
129
129
130 @propertycache
130 @propertycache
131 def _map(self):
131 def _map(self):
132 '''Return the dirstate contents as a map from filename to
132 '''Return the dirstate contents as a map from filename to
133 (state, mode, size, time).'''
133 (state, mode, size, time).'''
134 self._read()
134 self._read()
135 return self._map
135 return self._map
136
136
137 @propertycache
137 @propertycache
138 def _copymap(self):
138 def _copymap(self):
139 self._read()
139 self._read()
140 return self._copymap
140 return self._copymap
141
141
142 @propertycache
142 @propertycache
143 def _nonnormalset(self):
143 def _nonnormalset(self):
144 return nonnormalentries(self._map)
144 return nonnormalentries(self._map)
145
145
146 @propertycache
146 @propertycache
147 def _filefoldmap(self):
147 def _filefoldmap(self):
148 try:
148 try:
149 makefilefoldmap = parsers.make_file_foldmap
149 makefilefoldmap = parsers.make_file_foldmap
150 except AttributeError:
150 except AttributeError:
151 pass
151 pass
152 else:
152 else:
153 return makefilefoldmap(self._map, util.normcasespec,
153 return makefilefoldmap(self._map, util.normcasespec,
154 util.normcasefallback)
154 util.normcasefallback)
155
155
156 f = {}
156 f = {}
157 normcase = util.normcase
157 normcase = util.normcase
158 for name, s in self._map.iteritems():
158 for name, s in self._map.iteritems():
159 if s[0] != 'r':
159 if s[0] != 'r':
160 f[normcase(name)] = name
160 f[normcase(name)] = name
161 f['.'] = '.' # prevents useless util.fspath() invocation
161 f['.'] = '.' # prevents useless util.fspath() invocation
162 return f
162 return f
163
163
164 @propertycache
164 @propertycache
165 def _dirfoldmap(self):
165 def _dirfoldmap(self):
166 f = {}
166 f = {}
167 normcase = util.normcase
167 normcase = util.normcase
168 for name in self._dirs:
168 for name in self._dirs:
169 f[normcase(name)] = name
169 f[normcase(name)] = name
170 return f
170 return f
171
171
172 @repocache('branch')
172 @repocache('branch')
173 def _branch(self):
173 def _branch(self):
174 try:
174 try:
175 return self._opener.read("branch").strip() or "default"
175 return self._opener.read("branch").strip() or "default"
176 except IOError as inst:
176 except IOError as inst:
177 if inst.errno != errno.ENOENT:
177 if inst.errno != errno.ENOENT:
178 raise
178 raise
179 return "default"
179 return "default"
180
180
181 @propertycache
181 @propertycache
182 def _pl(self):
182 def _pl(self):
183 try:
183 try:
184 fp = self._opendirstatefile()
184 fp = self._opendirstatefile()
185 st = fp.read(40)
185 st = fp.read(40)
186 fp.close()
186 fp.close()
187 l = len(st)
187 l = len(st)
188 if l == 40:
188 if l == 40:
189 return st[:20], st[20:40]
189 return st[:20], st[20:40]
190 elif l > 0 and l < 40:
190 elif l > 0 and l < 40:
191 raise error.Abort(_('working directory state appears damaged!'))
191 raise error.Abort(_('working directory state appears damaged!'))
192 except IOError as err:
192 except IOError as err:
193 if err.errno != errno.ENOENT:
193 if err.errno != errno.ENOENT:
194 raise
194 raise
195 return [nullid, nullid]
195 return [nullid, nullid]
196
196
197 @propertycache
197 @propertycache
198 def _dirs(self):
198 def _dirs(self):
199 return util.dirs(self._map, 'r')
199 return util.dirs(self._map, 'r')
200
200
201 def dirs(self):
201 def dirs(self):
202 return self._dirs
202 return self._dirs
203
203
204 @rootcache('.hgignore')
204 @rootcache('.hgignore')
205 def _ignore(self):
205 def _ignore(self):
206 files = []
206 files = self._ignorefiles()
207 if os.path.exists(self._join('.hgignore')):
208 files.append(self._join('.hgignore'))
209 for name, path in self._ui.configitems("ui"):
210 if name == 'ignore' or name.startswith('ignore.'):
211 # we need to use os.path.join here rather than self._join
212 # because path is arbitrary and user-specified
213 files.append(os.path.join(self._rootdir, util.expandpath(path)))
214
215 if not files:
207 if not files:
216 return util.never
208 return util.never
217
209
218 pats = ['include:%s' % f for f in files]
210 pats = ['include:%s' % f for f in files]
219 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
211 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
220
212
221 @propertycache
213 @propertycache
222 def _slash(self):
214 def _slash(self):
223 return self._ui.configbool('ui', 'slash') and os.sep != '/'
215 return self._ui.configbool('ui', 'slash') and os.sep != '/'
224
216
225 @propertycache
217 @propertycache
226 def _checklink(self):
218 def _checklink(self):
227 return util.checklink(self._root)
219 return util.checklink(self._root)
228
220
229 @propertycache
221 @propertycache
230 def _checkexec(self):
222 def _checkexec(self):
231 return util.checkexec(self._root)
223 return util.checkexec(self._root)
232
224
233 @propertycache
225 @propertycache
234 def _checkcase(self):
226 def _checkcase(self):
235 return not util.checkcase(self._join('.hg'))
227 return not util.checkcase(self._join('.hg'))
236
228
237 def _join(self, f):
229 def _join(self, f):
238 # much faster than os.path.join()
230 # much faster than os.path.join()
239 # it's safe because f is always a relative path
231 # it's safe because f is always a relative path
240 return self._rootdir + f
232 return self._rootdir + f
241
233
242 def flagfunc(self, buildfallback):
234 def flagfunc(self, buildfallback):
243 if self._checklink and self._checkexec:
235 if self._checklink and self._checkexec:
244 def f(x):
236 def f(x):
245 try:
237 try:
246 st = os.lstat(self._join(x))
238 st = os.lstat(self._join(x))
247 if util.statislink(st):
239 if util.statislink(st):
248 return 'l'
240 return 'l'
249 if util.statisexec(st):
241 if util.statisexec(st):
250 return 'x'
242 return 'x'
251 except OSError:
243 except OSError:
252 pass
244 pass
253 return ''
245 return ''
254 return f
246 return f
255
247
256 fallback = buildfallback()
248 fallback = buildfallback()
257 if self._checklink:
249 if self._checklink:
258 def f(x):
250 def f(x):
259 if os.path.islink(self._join(x)):
251 if os.path.islink(self._join(x)):
260 return 'l'
252 return 'l'
261 if 'x' in fallback(x):
253 if 'x' in fallback(x):
262 return 'x'
254 return 'x'
263 return ''
255 return ''
264 return f
256 return f
265 if self._checkexec:
257 if self._checkexec:
266 def f(x):
258 def f(x):
267 if 'l' in fallback(x):
259 if 'l' in fallback(x):
268 return 'l'
260 return 'l'
269 if util.isexec(self._join(x)):
261 if util.isexec(self._join(x)):
270 return 'x'
262 return 'x'
271 return ''
263 return ''
272 return f
264 return f
273 else:
265 else:
274 return fallback
266 return fallback
275
267
276 @propertycache
268 @propertycache
277 def _cwd(self):
269 def _cwd(self):
278 return os.getcwd()
270 return os.getcwd()
279
271
280 def getcwd(self):
272 def getcwd(self):
281 '''Return the path from which a canonical path is calculated.
273 '''Return the path from which a canonical path is calculated.
282
274
283 This path should be used to resolve file patterns or to convert
275 This path should be used to resolve file patterns or to convert
284 canonical paths back to file paths for display. It shouldn't be
276 canonical paths back to file paths for display. It shouldn't be
285 used to get real file paths. Use vfs functions instead.
277 used to get real file paths. Use vfs functions instead.
286 '''
278 '''
287 cwd = self._cwd
279 cwd = self._cwd
288 if cwd == self._root:
280 if cwd == self._root:
289 return ''
281 return ''
290 # self._root ends with a path separator if self._root is '/' or 'C:\'
282 # self._root ends with a path separator if self._root is '/' or 'C:\'
291 rootsep = self._root
283 rootsep = self._root
292 if not util.endswithsep(rootsep):
284 if not util.endswithsep(rootsep):
293 rootsep += os.sep
285 rootsep += os.sep
294 if cwd.startswith(rootsep):
286 if cwd.startswith(rootsep):
295 return cwd[len(rootsep):]
287 return cwd[len(rootsep):]
296 else:
288 else:
297 # we're outside the repo. return an absolute path.
289 # we're outside the repo. return an absolute path.
298 return cwd
290 return cwd
299
291
300 def pathto(self, f, cwd=None):
292 def pathto(self, f, cwd=None):
301 if cwd is None:
293 if cwd is None:
302 cwd = self.getcwd()
294 cwd = self.getcwd()
303 path = util.pathto(self._root, cwd, f)
295 path = util.pathto(self._root, cwd, f)
304 if self._slash:
296 if self._slash:
305 return util.pconvert(path)
297 return util.pconvert(path)
306 return path
298 return path
307
299
308 def __getitem__(self, key):
300 def __getitem__(self, key):
309 '''Return the current state of key (a filename) in the dirstate.
301 '''Return the current state of key (a filename) in the dirstate.
310
302
311 States are:
303 States are:
312 n normal
304 n normal
313 m needs merging
305 m needs merging
314 r marked for removal
306 r marked for removal
315 a marked for addition
307 a marked for addition
316 ? not tracked
308 ? not tracked
317 '''
309 '''
318 return self._map.get(key, ("?",))[0]
310 return self._map.get(key, ("?",))[0]
319
311
320 def __contains__(self, key):
312 def __contains__(self, key):
321 return key in self._map
313 return key in self._map
322
314
323 def __iter__(self):
315 def __iter__(self):
324 for x in sorted(self._map):
316 for x in sorted(self._map):
325 yield x
317 yield x
326
318
327 def iteritems(self):
319 def iteritems(self):
328 return self._map.iteritems()
320 return self._map.iteritems()
329
321
330 def parents(self):
322 def parents(self):
331 return [self._validate(p) for p in self._pl]
323 return [self._validate(p) for p in self._pl]
332
324
333 def p1(self):
325 def p1(self):
334 return self._validate(self._pl[0])
326 return self._validate(self._pl[0])
335
327
336 def p2(self):
328 def p2(self):
337 return self._validate(self._pl[1])
329 return self._validate(self._pl[1])
338
330
339 def branch(self):
331 def branch(self):
340 return encoding.tolocal(self._branch)
332 return encoding.tolocal(self._branch)
341
333
342 def setparents(self, p1, p2=nullid):
334 def setparents(self, p1, p2=nullid):
343 """Set dirstate parents to p1 and p2.
335 """Set dirstate parents to p1 and p2.
344
336
345 When moving from two parents to one, 'm' merged entries a
337 When moving from two parents to one, 'm' merged entries a
346 adjusted to normal and previous copy records discarded and
338 adjusted to normal and previous copy records discarded and
347 returned by the call.
339 returned by the call.
348
340
349 See localrepo.setparents()
341 See localrepo.setparents()
350 """
342 """
351 if self._parentwriters == 0:
343 if self._parentwriters == 0:
352 raise ValueError("cannot set dirstate parent without "
344 raise ValueError("cannot set dirstate parent without "
353 "calling dirstate.beginparentchange")
345 "calling dirstate.beginparentchange")
354
346
355 self._dirty = self._dirtypl = True
347 self._dirty = self._dirtypl = True
356 oldp2 = self._pl[1]
348 oldp2 = self._pl[1]
357 self._pl = p1, p2
349 self._pl = p1, p2
358 copies = {}
350 copies = {}
359 if oldp2 != nullid and p2 == nullid:
351 if oldp2 != nullid and p2 == nullid:
360 for f, s in self._map.iteritems():
352 for f, s in self._map.iteritems():
361 # Discard 'm' markers when moving away from a merge state
353 # Discard 'm' markers when moving away from a merge state
362 if s[0] == 'm':
354 if s[0] == 'm':
363 if f in self._copymap:
355 if f in self._copymap:
364 copies[f] = self._copymap[f]
356 copies[f] = self._copymap[f]
365 self.normallookup(f)
357 self.normallookup(f)
366 # Also fix up otherparent markers
358 # Also fix up otherparent markers
367 elif s[0] == 'n' and s[2] == -2:
359 elif s[0] == 'n' and s[2] == -2:
368 if f in self._copymap:
360 if f in self._copymap:
369 copies[f] = self._copymap[f]
361 copies[f] = self._copymap[f]
370 self.add(f)
362 self.add(f)
371 return copies
363 return copies
372
364
373 def setbranch(self, branch):
365 def setbranch(self, branch):
374 self._branch = encoding.fromlocal(branch)
366 self._branch = encoding.fromlocal(branch)
375 f = self._opener('branch', 'w', atomictemp=True)
367 f = self._opener('branch', 'w', atomictemp=True)
376 try:
368 try:
377 f.write(self._branch + '\n')
369 f.write(self._branch + '\n')
378 f.close()
370 f.close()
379
371
380 # make sure filecache has the correct stat info for _branch after
372 # make sure filecache has the correct stat info for _branch after
381 # replacing the underlying file
373 # replacing the underlying file
382 ce = self._filecache['_branch']
374 ce = self._filecache['_branch']
383 if ce:
375 if ce:
384 ce.refresh()
376 ce.refresh()
385 except: # re-raises
377 except: # re-raises
386 f.discard()
378 f.discard()
387 raise
379 raise
388
380
389 def _opendirstatefile(self):
381 def _opendirstatefile(self):
390 fp, mode = _trypending(self._root, self._opener, self._filename)
382 fp, mode = _trypending(self._root, self._opener, self._filename)
391 if self._pendingmode is not None and self._pendingmode != mode:
383 if self._pendingmode is not None and self._pendingmode != mode:
392 fp.close()
384 fp.close()
393 raise error.Abort(_('working directory state may be '
385 raise error.Abort(_('working directory state may be '
394 'changed parallelly'))
386 'changed parallelly'))
395 self._pendingmode = mode
387 self._pendingmode = mode
396 return fp
388 return fp
397
389
398 def _read(self):
390 def _read(self):
399 self._map = {}
391 self._map = {}
400 self._copymap = {}
392 self._copymap = {}
401 try:
393 try:
402 fp = self._opendirstatefile()
394 fp = self._opendirstatefile()
403 try:
395 try:
404 st = fp.read()
396 st = fp.read()
405 finally:
397 finally:
406 fp.close()
398 fp.close()
407 except IOError as err:
399 except IOError as err:
408 if err.errno != errno.ENOENT:
400 if err.errno != errno.ENOENT:
409 raise
401 raise
410 return
402 return
411 if not st:
403 if not st:
412 return
404 return
413
405
414 if util.safehasattr(parsers, 'dict_new_presized'):
406 if util.safehasattr(parsers, 'dict_new_presized'):
415 # Make an estimate of the number of files in the dirstate based on
407 # Make an estimate of the number of files in the dirstate based on
416 # its size. From a linear regression on a set of real-world repos,
408 # its size. From a linear regression on a set of real-world repos,
417 # all over 10,000 files, the size of a dirstate entry is 85
409 # all over 10,000 files, the size of a dirstate entry is 85
418 # bytes. The cost of resizing is significantly higher than the cost
410 # bytes. The cost of resizing is significantly higher than the cost
419 # of filling in a larger presized dict, so subtract 20% from the
411 # of filling in a larger presized dict, so subtract 20% from the
420 # size.
412 # size.
421 #
413 #
422 # This heuristic is imperfect in many ways, so in a future dirstate
414 # This heuristic is imperfect in many ways, so in a future dirstate
423 # format update it makes sense to just record the number of entries
415 # format update it makes sense to just record the number of entries
424 # on write.
416 # on write.
425 self._map = parsers.dict_new_presized(len(st) / 71)
417 self._map = parsers.dict_new_presized(len(st) / 71)
426
418
427 # Python's garbage collector triggers a GC each time a certain number
419 # Python's garbage collector triggers a GC each time a certain number
428 # of container objects (the number being defined by
420 # of container objects (the number being defined by
429 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
430 # for each file in the dirstate. The C version then immediately marks
422 # for each file in the dirstate. The C version then immediately marks
431 # them as not to be tracked by the collector. However, this has no
423 # them as not to be tracked by the collector. However, this has no
432 # effect on when GCs are triggered, only on what objects the GC looks
424 # effect on when GCs are triggered, only on what objects the GC looks
433 # into. This means that O(number of files) GCs are unavoidable.
425 # into. This means that O(number of files) GCs are unavoidable.
434 # Depending on when in the process's lifetime the dirstate is parsed,
426 # Depending on when in the process's lifetime the dirstate is parsed,
435 # this can get very expensive. As a workaround, disable GC while
427 # this can get very expensive. As a workaround, disable GC while
436 # parsing the dirstate.
428 # parsing the dirstate.
437 #
429 #
438 # (we cannot decorate the function directly since it is in a C module)
430 # (we cannot decorate the function directly since it is in a C module)
439 parse_dirstate = util.nogc(parsers.parse_dirstate)
431 parse_dirstate = util.nogc(parsers.parse_dirstate)
440 p = parse_dirstate(self._map, self._copymap, st)
432 p = parse_dirstate(self._map, self._copymap, st)
441 if not self._dirtypl:
433 if not self._dirtypl:
442 self._pl = p
434 self._pl = p
443
435
444 def invalidate(self):
436 def invalidate(self):
445 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
437 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
446 "_pl", "_dirs", "_ignore", "_nonnormalset"):
438 "_pl", "_dirs", "_ignore", "_nonnormalset"):
447 if a in self.__dict__:
439 if a in self.__dict__:
448 delattr(self, a)
440 delattr(self, a)
449 self._lastnormaltime = 0
441 self._lastnormaltime = 0
450 self._dirty = False
442 self._dirty = False
451 self._parentwriters = 0
443 self._parentwriters = 0
452
444
453 def copy(self, source, dest):
445 def copy(self, source, dest):
454 """Mark dest as a copy of source. Unmark dest if source is None."""
446 """Mark dest as a copy of source. Unmark dest if source is None."""
455 if source == dest:
447 if source == dest:
456 return
448 return
457 self._dirty = True
449 self._dirty = True
458 if source is not None:
450 if source is not None:
459 self._copymap[dest] = source
451 self._copymap[dest] = source
460 elif dest in self._copymap:
452 elif dest in self._copymap:
461 del self._copymap[dest]
453 del self._copymap[dest]
462
454
463 def copied(self, file):
455 def copied(self, file):
464 return self._copymap.get(file, None)
456 return self._copymap.get(file, None)
465
457
466 def copies(self):
458 def copies(self):
467 return self._copymap
459 return self._copymap
468
460
469 def _droppath(self, f):
461 def _droppath(self, f):
470 if self[f] not in "?r" and "_dirs" in self.__dict__:
462 if self[f] not in "?r" and "_dirs" in self.__dict__:
471 self._dirs.delpath(f)
463 self._dirs.delpath(f)
472
464
473 if "_filefoldmap" in self.__dict__:
465 if "_filefoldmap" in self.__dict__:
474 normed = util.normcase(f)
466 normed = util.normcase(f)
475 if normed in self._filefoldmap:
467 if normed in self._filefoldmap:
476 del self._filefoldmap[normed]
468 del self._filefoldmap[normed]
477
469
478 def _addpath(self, f, state, mode, size, mtime):
470 def _addpath(self, f, state, mode, size, mtime):
479 oldstate = self[f]
471 oldstate = self[f]
480 if state == 'a' or oldstate == 'r':
472 if state == 'a' or oldstate == 'r':
481 scmutil.checkfilename(f)
473 scmutil.checkfilename(f)
482 if f in self._dirs:
474 if f in self._dirs:
483 raise error.Abort(_('directory %r already in dirstate') % f)
475 raise error.Abort(_('directory %r already in dirstate') % f)
484 # shadows
476 # shadows
485 for d in util.finddirs(f):
477 for d in util.finddirs(f):
486 if d in self._dirs:
478 if d in self._dirs:
487 break
479 break
488 if d in self._map and self[d] != 'r':
480 if d in self._map and self[d] != 'r':
489 raise error.Abort(
481 raise error.Abort(
490 _('file %r in dirstate clashes with %r') % (d, f))
482 _('file %r in dirstate clashes with %r') % (d, f))
491 if oldstate in "?r" and "_dirs" in self.__dict__:
483 if oldstate in "?r" and "_dirs" in self.__dict__:
492 self._dirs.addpath(f)
484 self._dirs.addpath(f)
493 self._dirty = True
485 self._dirty = True
494 self._map[f] = dirstatetuple(state, mode, size, mtime)
486 self._map[f] = dirstatetuple(state, mode, size, mtime)
495 if state != 'n' or mtime == -1:
487 if state != 'n' or mtime == -1:
496 self._nonnormalset.add(f)
488 self._nonnormalset.add(f)
497
489
498 def normal(self, f):
490 def normal(self, f):
499 '''Mark a file normal and clean.'''
491 '''Mark a file normal and clean.'''
500 s = os.lstat(self._join(f))
492 s = os.lstat(self._join(f))
501 mtime = s.st_mtime
493 mtime = s.st_mtime
502 self._addpath(f, 'n', s.st_mode,
494 self._addpath(f, 'n', s.st_mode,
503 s.st_size & _rangemask, mtime & _rangemask)
495 s.st_size & _rangemask, mtime & _rangemask)
504 if f in self._copymap:
496 if f in self._copymap:
505 del self._copymap[f]
497 del self._copymap[f]
506 if f in self._nonnormalset:
498 if f in self._nonnormalset:
507 self._nonnormalset.remove(f)
499 self._nonnormalset.remove(f)
508 if mtime > self._lastnormaltime:
500 if mtime > self._lastnormaltime:
509 # Remember the most recent modification timeslot for status(),
501 # Remember the most recent modification timeslot for status(),
510 # to make sure we won't miss future size-preserving file content
502 # to make sure we won't miss future size-preserving file content
511 # modifications that happen within the same timeslot.
503 # modifications that happen within the same timeslot.
512 self._lastnormaltime = mtime
504 self._lastnormaltime = mtime
513
505
514 def normallookup(self, f):
506 def normallookup(self, f):
515 '''Mark a file normal, but possibly dirty.'''
507 '''Mark a file normal, but possibly dirty.'''
516 if self._pl[1] != nullid and f in self._map:
508 if self._pl[1] != nullid and f in self._map:
517 # if there is a merge going on and the file was either
509 # if there is a merge going on and the file was either
518 # in state 'm' (-1) or coming from other parent (-2) before
510 # in state 'm' (-1) or coming from other parent (-2) before
519 # being removed, restore that state.
511 # being removed, restore that state.
520 entry = self._map[f]
512 entry = self._map[f]
521 if entry[0] == 'r' and entry[2] in (-1, -2):
513 if entry[0] == 'r' and entry[2] in (-1, -2):
522 source = self._copymap.get(f)
514 source = self._copymap.get(f)
523 if entry[2] == -1:
515 if entry[2] == -1:
524 self.merge(f)
516 self.merge(f)
525 elif entry[2] == -2:
517 elif entry[2] == -2:
526 self.otherparent(f)
518 self.otherparent(f)
527 if source:
519 if source:
528 self.copy(source, f)
520 self.copy(source, f)
529 return
521 return
530 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
522 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
531 return
523 return
532 self._addpath(f, 'n', 0, -1, -1)
524 self._addpath(f, 'n', 0, -1, -1)
533 if f in self._copymap:
525 if f in self._copymap:
534 del self._copymap[f]
526 del self._copymap[f]
535 if f in self._nonnormalset:
527 if f in self._nonnormalset:
536 self._nonnormalset.remove(f)
528 self._nonnormalset.remove(f)
537
529
538 def otherparent(self, f):
530 def otherparent(self, f):
539 '''Mark as coming from the other parent, always dirty.'''
531 '''Mark as coming from the other parent, always dirty.'''
540 if self._pl[1] == nullid:
532 if self._pl[1] == nullid:
541 raise error.Abort(_("setting %r to other parent "
533 raise error.Abort(_("setting %r to other parent "
542 "only allowed in merges") % f)
534 "only allowed in merges") % f)
543 if f in self and self[f] == 'n':
535 if f in self and self[f] == 'n':
544 # merge-like
536 # merge-like
545 self._addpath(f, 'm', 0, -2, -1)
537 self._addpath(f, 'm', 0, -2, -1)
546 else:
538 else:
547 # add-like
539 # add-like
548 self._addpath(f, 'n', 0, -2, -1)
540 self._addpath(f, 'n', 0, -2, -1)
549
541
550 if f in self._copymap:
542 if f in self._copymap:
551 del self._copymap[f]
543 del self._copymap[f]
552
544
553 def add(self, f):
545 def add(self, f):
554 '''Mark a file added.'''
546 '''Mark a file added.'''
555 self._addpath(f, 'a', 0, -1, -1)
547 self._addpath(f, 'a', 0, -1, -1)
556 if f in self._copymap:
548 if f in self._copymap:
557 del self._copymap[f]
549 del self._copymap[f]
558
550
559 def remove(self, f):
551 def remove(self, f):
560 '''Mark a file removed.'''
552 '''Mark a file removed.'''
561 self._dirty = True
553 self._dirty = True
562 self._droppath(f)
554 self._droppath(f)
563 size = 0
555 size = 0
564 if self._pl[1] != nullid and f in self._map:
556 if self._pl[1] != nullid and f in self._map:
565 # backup the previous state
557 # backup the previous state
566 entry = self._map[f]
558 entry = self._map[f]
567 if entry[0] == 'm': # merge
559 if entry[0] == 'm': # merge
568 size = -1
560 size = -1
569 elif entry[0] == 'n' and entry[2] == -2: # other parent
561 elif entry[0] == 'n' and entry[2] == -2: # other parent
570 size = -2
562 size = -2
571 self._map[f] = dirstatetuple('r', 0, size, 0)
563 self._map[f] = dirstatetuple('r', 0, size, 0)
572 self._nonnormalset.add(f)
564 self._nonnormalset.add(f)
573 if size == 0 and f in self._copymap:
565 if size == 0 and f in self._copymap:
574 del self._copymap[f]
566 del self._copymap[f]
575
567
576 def merge(self, f):
568 def merge(self, f):
577 '''Mark a file merged.'''
569 '''Mark a file merged.'''
578 if self._pl[1] == nullid:
570 if self._pl[1] == nullid:
579 return self.normallookup(f)
571 return self.normallookup(f)
580 return self.otherparent(f)
572 return self.otherparent(f)
581
573
582 def drop(self, f):
574 def drop(self, f):
583 '''Drop a file from the dirstate'''
575 '''Drop a file from the dirstate'''
584 if f in self._map:
576 if f in self._map:
585 self._dirty = True
577 self._dirty = True
586 self._droppath(f)
578 self._droppath(f)
587 del self._map[f]
579 del self._map[f]
588 if f in self._nonnormalset:
580 if f in self._nonnormalset:
589 self._nonnormalset.remove(f)
581 self._nonnormalset.remove(f)
590
582
591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
583 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 if exists is None:
584 if exists is None:
593 exists = os.path.lexists(os.path.join(self._root, path))
585 exists = os.path.lexists(os.path.join(self._root, path))
594 if not exists:
586 if not exists:
595 # Maybe a path component exists
587 # Maybe a path component exists
596 if not ignoremissing and '/' in path:
588 if not ignoremissing and '/' in path:
597 d, f = path.rsplit('/', 1)
589 d, f = path.rsplit('/', 1)
598 d = self._normalize(d, False, ignoremissing, None)
590 d = self._normalize(d, False, ignoremissing, None)
599 folded = d + "/" + f
591 folded = d + "/" + f
600 else:
592 else:
601 # No path components, preserve original case
593 # No path components, preserve original case
602 folded = path
594 folded = path
603 else:
595 else:
604 # recursively normalize leading directory components
596 # recursively normalize leading directory components
605 # against dirstate
597 # against dirstate
606 if '/' in normed:
598 if '/' in normed:
607 d, f = normed.rsplit('/', 1)
599 d, f = normed.rsplit('/', 1)
608 d = self._normalize(d, False, ignoremissing, True)
600 d = self._normalize(d, False, ignoremissing, True)
609 r = self._root + "/" + d
601 r = self._root + "/" + d
610 folded = d + "/" + util.fspath(f, r)
602 folded = d + "/" + util.fspath(f, r)
611 else:
603 else:
612 folded = util.fspath(normed, self._root)
604 folded = util.fspath(normed, self._root)
613 storemap[normed] = folded
605 storemap[normed] = folded
614
606
615 return folded
607 return folded
616
608
617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
609 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 normed = util.normcase(path)
610 normed = util.normcase(path)
619 folded = self._filefoldmap.get(normed, None)
611 folded = self._filefoldmap.get(normed, None)
620 if folded is None:
612 if folded is None:
621 if isknown:
613 if isknown:
622 folded = path
614 folded = path
623 else:
615 else:
624 folded = self._discoverpath(path, normed, ignoremissing, exists,
616 folded = self._discoverpath(path, normed, ignoremissing, exists,
625 self._filefoldmap)
617 self._filefoldmap)
626 return folded
618 return folded
627
619
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
620 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
629 normed = util.normcase(path)
621 normed = util.normcase(path)
630 folded = self._filefoldmap.get(normed, None)
622 folded = self._filefoldmap.get(normed, None)
631 if folded is None:
623 if folded is None:
632 folded = self._dirfoldmap.get(normed, None)
624 folded = self._dirfoldmap.get(normed, None)
633 if folded is None:
625 if folded is None:
634 if isknown:
626 if isknown:
635 folded = path
627 folded = path
636 else:
628 else:
637 # store discovered result in dirfoldmap so that future
629 # store discovered result in dirfoldmap so that future
638 # normalizefile calls don't start matching directories
630 # normalizefile calls don't start matching directories
639 folded = self._discoverpath(path, normed, ignoremissing, exists,
631 folded = self._discoverpath(path, normed, ignoremissing, exists,
640 self._dirfoldmap)
632 self._dirfoldmap)
641 return folded
633 return folded
642
634
643 def normalize(self, path, isknown=False, ignoremissing=False):
635 def normalize(self, path, isknown=False, ignoremissing=False):
644 '''
636 '''
645 normalize the case of a pathname when on a casefolding filesystem
637 normalize the case of a pathname when on a casefolding filesystem
646
638
647 isknown specifies whether the filename came from walking the
639 isknown specifies whether the filename came from walking the
648 disk, to avoid extra filesystem access.
640 disk, to avoid extra filesystem access.
649
641
650 If ignoremissing is True, missing path are returned
642 If ignoremissing is True, missing path are returned
651 unchanged. Otherwise, we try harder to normalize possibly
643 unchanged. Otherwise, we try harder to normalize possibly
652 existing path components.
644 existing path components.
653
645
654 The normalized case is determined based on the following precedence:
646 The normalized case is determined based on the following precedence:
655
647
656 - version of name already stored in the dirstate
648 - version of name already stored in the dirstate
657 - version of name stored on disk
649 - version of name stored on disk
658 - version provided via command arguments
650 - version provided via command arguments
659 '''
651 '''
660
652
661 if self._checkcase:
653 if self._checkcase:
662 return self._normalize(path, isknown, ignoremissing)
654 return self._normalize(path, isknown, ignoremissing)
663 return path
655 return path
664
656
665 def clear(self):
657 def clear(self):
666 self._map = {}
658 self._map = {}
667 self._nonnormalset = set()
659 self._nonnormalset = set()
668 if "_dirs" in self.__dict__:
660 if "_dirs" in self.__dict__:
669 delattr(self, "_dirs")
661 delattr(self, "_dirs")
670 self._copymap = {}
662 self._copymap = {}
671 self._pl = [nullid, nullid]
663 self._pl = [nullid, nullid]
672 self._lastnormaltime = 0
664 self._lastnormaltime = 0
673 self._dirty = True
665 self._dirty = True
674
666
675 def rebuild(self, parent, allfiles, changedfiles=None):
667 def rebuild(self, parent, allfiles, changedfiles=None):
676 if changedfiles is None:
668 if changedfiles is None:
677 # Rebuild entire dirstate
669 # Rebuild entire dirstate
678 changedfiles = allfiles
670 changedfiles = allfiles
679 lastnormaltime = self._lastnormaltime
671 lastnormaltime = self._lastnormaltime
680 self.clear()
672 self.clear()
681 self._lastnormaltime = lastnormaltime
673 self._lastnormaltime = lastnormaltime
682
674
683 for f in changedfiles:
675 for f in changedfiles:
684 mode = 0o666
676 mode = 0o666
685 if f in allfiles and 'x' in allfiles.flags(f):
677 if f in allfiles and 'x' in allfiles.flags(f):
686 mode = 0o777
678 mode = 0o777
687
679
688 if f in allfiles:
680 if f in allfiles:
689 self._map[f] = dirstatetuple('n', mode, -1, 0)
681 self._map[f] = dirstatetuple('n', mode, -1, 0)
690 else:
682 else:
691 self._map.pop(f, None)
683 self._map.pop(f, None)
692 if f in self._nonnormalset:
684 if f in self._nonnormalset:
693 self._nonnormalset.remove(f)
685 self._nonnormalset.remove(f)
694
686
695 self._pl = (parent, nullid)
687 self._pl = (parent, nullid)
696 self._dirty = True
688 self._dirty = True
697
689
698 def write(self, tr=False):
690 def write(self, tr=False):
699 if not self._dirty:
691 if not self._dirty:
700 return
692 return
701
693
702 filename = self._filename
694 filename = self._filename
703 if tr is False: # not explicitly specified
695 if tr is False: # not explicitly specified
704 if (self._ui.configbool('devel', 'all-warnings')
696 if (self._ui.configbool('devel', 'all-warnings')
705 or self._ui.configbool('devel', 'check-dirstate-write')):
697 or self._ui.configbool('devel', 'check-dirstate-write')):
706 self._ui.develwarn('use dirstate.write with '
698 self._ui.develwarn('use dirstate.write with '
707 'repo.currenttransaction()')
699 'repo.currenttransaction()')
708
700
709 if self._opener.lexists(self._pendingfilename):
701 if self._opener.lexists(self._pendingfilename):
710 # if pending file already exists, in-memory changes
702 # if pending file already exists, in-memory changes
711 # should be written into it, because it has priority
703 # should be written into it, because it has priority
712 # to '.hg/dirstate' at reading under HG_PENDING mode
704 # to '.hg/dirstate' at reading under HG_PENDING mode
713 filename = self._pendingfilename
705 filename = self._pendingfilename
714 elif tr:
706 elif tr:
715 # 'dirstate.write()' is not only for writing in-memory
707 # 'dirstate.write()' is not only for writing in-memory
716 # changes out, but also for dropping ambiguous timestamp.
708 # changes out, but also for dropping ambiguous timestamp.
717 # delayed writing re-raise "ambiguous timestamp issue".
709 # delayed writing re-raise "ambiguous timestamp issue".
718 # See also the wiki page below for detail:
710 # See also the wiki page below for detail:
719 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
711 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
720
712
721 # emulate dropping timestamp in 'parsers.pack_dirstate'
713 # emulate dropping timestamp in 'parsers.pack_dirstate'
722 now = _getfsnow(self._opener)
714 now = _getfsnow(self._opener)
723 dmap = self._map
715 dmap = self._map
724 for f, e in dmap.iteritems():
716 for f, e in dmap.iteritems():
725 if e[0] == 'n' and e[3] == now:
717 if e[0] == 'n' and e[3] == now:
726 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
718 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
727 self._nonnormalset.add(f)
719 self._nonnormalset.add(f)
728
720
729 # emulate that all 'dirstate.normal' results are written out
721 # emulate that all 'dirstate.normal' results are written out
730 self._lastnormaltime = 0
722 self._lastnormaltime = 0
731
723
732 # delay writing in-memory changes out
724 # delay writing in-memory changes out
733 tr.addfilegenerator('dirstate', (self._filename,),
725 tr.addfilegenerator('dirstate', (self._filename,),
734 self._writedirstate, location='plain')
726 self._writedirstate, location='plain')
735 return
727 return
736
728
737 st = self._opener(filename, "w", atomictemp=True)
729 st = self._opener(filename, "w", atomictemp=True)
738 self._writedirstate(st)
730 self._writedirstate(st)
739
731
740 def _writedirstate(self, st):
732 def _writedirstate(self, st):
741 # use the modification time of the newly created temporary file as the
733 # use the modification time of the newly created temporary file as the
742 # filesystem's notion of 'now'
734 # filesystem's notion of 'now'
743 now = util.fstat(st).st_mtime & _rangemask
735 now = util.fstat(st).st_mtime & _rangemask
744
736
745 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
737 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
746 # timestamp of each entries in dirstate, because of 'now > mtime'
738 # timestamp of each entries in dirstate, because of 'now > mtime'
747 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
739 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
748 if delaywrite > 0:
740 if delaywrite > 0:
749 # do we have any files to delay for?
741 # do we have any files to delay for?
750 for f, e in self._map.iteritems():
742 for f, e in self._map.iteritems():
751 if e[0] == 'n' and e[3] == now:
743 if e[0] == 'n' and e[3] == now:
752 import time # to avoid useless import
744 import time # to avoid useless import
753 # rather than sleep n seconds, sleep until the next
745 # rather than sleep n seconds, sleep until the next
754 # multiple of n seconds
746 # multiple of n seconds
755 clock = time.time()
747 clock = time.time()
756 start = int(clock) - (int(clock) % delaywrite)
748 start = int(clock) - (int(clock) % delaywrite)
757 end = start + delaywrite
749 end = start + delaywrite
758 time.sleep(end - clock)
750 time.sleep(end - clock)
759 break
751 break
760
752
761 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
753 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
762 self._nonnormalset = nonnormalentries(self._map)
754 self._nonnormalset = nonnormalentries(self._map)
763 st.close()
755 st.close()
764 self._lastnormaltime = 0
756 self._lastnormaltime = 0
765 self._dirty = self._dirtypl = False
757 self._dirty = self._dirtypl = False
766
758
767 def _dirignore(self, f):
759 def _dirignore(self, f):
768 if f == '.':
760 if f == '.':
769 return False
761 return False
770 if self._ignore(f):
762 if self._ignore(f):
771 return True
763 return True
772 for p in util.finddirs(f):
764 for p in util.finddirs(f):
773 if self._ignore(p):
765 if self._ignore(p):
774 return True
766 return True
775 return False
767 return False
776
768
769 def _ignorefiles(self):
770 files = []
771 if os.path.exists(self._join('.hgignore')):
772 files.append(self._join('.hgignore'))
773 for name, path in self._ui.configitems("ui"):
774 if name == 'ignore' or name.startswith('ignore.'):
775 # we need to use os.path.join here rather than self._join
776 # because path is arbitrary and user-specified
777 files.append(os.path.join(self._rootdir, util.expandpath(path)))
778 return files
779
777 def _walkexplicit(self, match, subrepos):
780 def _walkexplicit(self, match, subrepos):
778 '''Get stat data about the files explicitly specified by match.
781 '''Get stat data about the files explicitly specified by match.
779
782
780 Return a triple (results, dirsfound, dirsnotfound).
783 Return a triple (results, dirsfound, dirsnotfound).
781 - results is a mapping from filename to stat result. It also contains
784 - results is a mapping from filename to stat result. It also contains
782 listings mapping subrepos and .hg to None.
785 listings mapping subrepos and .hg to None.
783 - dirsfound is a list of files found to be directories.
786 - dirsfound is a list of files found to be directories.
784 - dirsnotfound is a list of files that the dirstate thinks are
787 - dirsnotfound is a list of files that the dirstate thinks are
785 directories and that were not found.'''
788 directories and that were not found.'''
786
789
787 def badtype(mode):
790 def badtype(mode):
788 kind = _('unknown')
791 kind = _('unknown')
789 if stat.S_ISCHR(mode):
792 if stat.S_ISCHR(mode):
790 kind = _('character device')
793 kind = _('character device')
791 elif stat.S_ISBLK(mode):
794 elif stat.S_ISBLK(mode):
792 kind = _('block device')
795 kind = _('block device')
793 elif stat.S_ISFIFO(mode):
796 elif stat.S_ISFIFO(mode):
794 kind = _('fifo')
797 kind = _('fifo')
795 elif stat.S_ISSOCK(mode):
798 elif stat.S_ISSOCK(mode):
796 kind = _('socket')
799 kind = _('socket')
797 elif stat.S_ISDIR(mode):
800 elif stat.S_ISDIR(mode):
798 kind = _('directory')
801 kind = _('directory')
799 return _('unsupported file type (type is %s)') % kind
802 return _('unsupported file type (type is %s)') % kind
800
803
801 matchedir = match.explicitdir
804 matchedir = match.explicitdir
802 badfn = match.bad
805 badfn = match.bad
803 dmap = self._map
806 dmap = self._map
804 lstat = os.lstat
807 lstat = os.lstat
805 getkind = stat.S_IFMT
808 getkind = stat.S_IFMT
806 dirkind = stat.S_IFDIR
809 dirkind = stat.S_IFDIR
807 regkind = stat.S_IFREG
810 regkind = stat.S_IFREG
808 lnkkind = stat.S_IFLNK
811 lnkkind = stat.S_IFLNK
809 join = self._join
812 join = self._join
810 dirsfound = []
813 dirsfound = []
811 foundadd = dirsfound.append
814 foundadd = dirsfound.append
812 dirsnotfound = []
815 dirsnotfound = []
813 notfoundadd = dirsnotfound.append
816 notfoundadd = dirsnotfound.append
814
817
815 if not match.isexact() and self._checkcase:
818 if not match.isexact() and self._checkcase:
816 normalize = self._normalize
819 normalize = self._normalize
817 else:
820 else:
818 normalize = None
821 normalize = None
819
822
820 files = sorted(match.files())
823 files = sorted(match.files())
821 subrepos.sort()
824 subrepos.sort()
822 i, j = 0, 0
825 i, j = 0, 0
823 while i < len(files) and j < len(subrepos):
826 while i < len(files) and j < len(subrepos):
824 subpath = subrepos[j] + "/"
827 subpath = subrepos[j] + "/"
825 if files[i] < subpath:
828 if files[i] < subpath:
826 i += 1
829 i += 1
827 continue
830 continue
828 while i < len(files) and files[i].startswith(subpath):
831 while i < len(files) and files[i].startswith(subpath):
829 del files[i]
832 del files[i]
830 j += 1
833 j += 1
831
834
832 if not files or '.' in files:
835 if not files or '.' in files:
833 files = ['.']
836 files = ['.']
834 results = dict.fromkeys(subrepos)
837 results = dict.fromkeys(subrepos)
835 results['.hg'] = None
838 results['.hg'] = None
836
839
837 alldirs = None
840 alldirs = None
838 for ff in files:
841 for ff in files:
839 # constructing the foldmap is expensive, so don't do it for the
842 # constructing the foldmap is expensive, so don't do it for the
840 # common case where files is ['.']
843 # common case where files is ['.']
841 if normalize and ff != '.':
844 if normalize and ff != '.':
842 nf = normalize(ff, False, True)
845 nf = normalize(ff, False, True)
843 else:
846 else:
844 nf = ff
847 nf = ff
845 if nf in results:
848 if nf in results:
846 continue
849 continue
847
850
848 try:
851 try:
849 st = lstat(join(nf))
852 st = lstat(join(nf))
850 kind = getkind(st.st_mode)
853 kind = getkind(st.st_mode)
851 if kind == dirkind:
854 if kind == dirkind:
852 if nf in dmap:
855 if nf in dmap:
853 # file replaced by dir on disk but still in dirstate
856 # file replaced by dir on disk but still in dirstate
854 results[nf] = None
857 results[nf] = None
855 if matchedir:
858 if matchedir:
856 matchedir(nf)
859 matchedir(nf)
857 foundadd((nf, ff))
860 foundadd((nf, ff))
858 elif kind == regkind or kind == lnkkind:
861 elif kind == regkind or kind == lnkkind:
859 results[nf] = st
862 results[nf] = st
860 else:
863 else:
861 badfn(ff, badtype(kind))
864 badfn(ff, badtype(kind))
862 if nf in dmap:
865 if nf in dmap:
863 results[nf] = None
866 results[nf] = None
864 except OSError as inst: # nf not found on disk - it is dirstate only
867 except OSError as inst: # nf not found on disk - it is dirstate only
865 if nf in dmap: # does it exactly match a missing file?
868 if nf in dmap: # does it exactly match a missing file?
866 results[nf] = None
869 results[nf] = None
867 else: # does it match a missing directory?
870 else: # does it match a missing directory?
868 if alldirs is None:
871 if alldirs is None:
869 alldirs = util.dirs(dmap)
872 alldirs = util.dirs(dmap)
870 if nf in alldirs:
873 if nf in alldirs:
871 if matchedir:
874 if matchedir:
872 matchedir(nf)
875 matchedir(nf)
873 notfoundadd(nf)
876 notfoundadd(nf)
874 else:
877 else:
875 badfn(ff, inst.strerror)
878 badfn(ff, inst.strerror)
876
879
877 # Case insensitive filesystems cannot rely on lstat() failing to detect
880 # Case insensitive filesystems cannot rely on lstat() failing to detect
878 # a case-only rename. Prune the stat object for any file that does not
881 # a case-only rename. Prune the stat object for any file that does not
879 # match the case in the filesystem, if there are multiple files that
882 # match the case in the filesystem, if there are multiple files that
880 # normalize to the same path.
883 # normalize to the same path.
881 if match.isexact() and self._checkcase:
884 if match.isexact() and self._checkcase:
882 normed = {}
885 normed = {}
883
886
884 for f, st in results.iteritems():
887 for f, st in results.iteritems():
885 if st is None:
888 if st is None:
886 continue
889 continue
887
890
888 nc = util.normcase(f)
891 nc = util.normcase(f)
889 paths = normed.get(nc)
892 paths = normed.get(nc)
890
893
891 if paths is None:
894 if paths is None:
892 paths = set()
895 paths = set()
893 normed[nc] = paths
896 normed[nc] = paths
894
897
895 paths.add(f)
898 paths.add(f)
896
899
897 for norm, paths in normed.iteritems():
900 for norm, paths in normed.iteritems():
898 if len(paths) > 1:
901 if len(paths) > 1:
899 for path in paths:
902 for path in paths:
900 folded = self._discoverpath(path, norm, True, None,
903 folded = self._discoverpath(path, norm, True, None,
901 self._dirfoldmap)
904 self._dirfoldmap)
902 if path != folded:
905 if path != folded:
903 results[path] = None
906 results[path] = None
904
907
905 return results, dirsfound, dirsnotfound
908 return results, dirsfound, dirsnotfound
906
909
907 def walk(self, match, subrepos, unknown, ignored, full=True):
910 def walk(self, match, subrepos, unknown, ignored, full=True):
908 '''
911 '''
909 Walk recursively through the directory tree, finding all files
912 Walk recursively through the directory tree, finding all files
910 matched by match.
913 matched by match.
911
914
912 If full is False, maybe skip some known-clean files.
915 If full is False, maybe skip some known-clean files.
913
916
914 Return a dict mapping filename to stat-like object (either
917 Return a dict mapping filename to stat-like object (either
915 mercurial.osutil.stat instance or return value of os.stat()).
918 mercurial.osutil.stat instance or return value of os.stat()).
916
919
917 '''
920 '''
918 # full is a flag that extensions that hook into walk can use -- this
921 # full is a flag that extensions that hook into walk can use -- this
919 # implementation doesn't use it at all. This satisfies the contract
922 # implementation doesn't use it at all. This satisfies the contract
920 # because we only guarantee a "maybe".
923 # because we only guarantee a "maybe".
921
924
922 if ignored:
925 if ignored:
923 ignore = util.never
926 ignore = util.never
924 dirignore = util.never
927 dirignore = util.never
925 elif unknown:
928 elif unknown:
926 ignore = self._ignore
929 ignore = self._ignore
927 dirignore = self._dirignore
930 dirignore = self._dirignore
928 else:
931 else:
929 # if not unknown and not ignored, drop dir recursion and step 2
932 # if not unknown and not ignored, drop dir recursion and step 2
930 ignore = util.always
933 ignore = util.always
931 dirignore = util.always
934 dirignore = util.always
932
935
933 matchfn = match.matchfn
936 matchfn = match.matchfn
934 matchalways = match.always()
937 matchalways = match.always()
935 matchtdir = match.traversedir
938 matchtdir = match.traversedir
936 dmap = self._map
939 dmap = self._map
937 listdir = osutil.listdir
940 listdir = osutil.listdir
938 lstat = os.lstat
941 lstat = os.lstat
939 dirkind = stat.S_IFDIR
942 dirkind = stat.S_IFDIR
940 regkind = stat.S_IFREG
943 regkind = stat.S_IFREG
941 lnkkind = stat.S_IFLNK
944 lnkkind = stat.S_IFLNK
942 join = self._join
945 join = self._join
943
946
944 exact = skipstep3 = False
947 exact = skipstep3 = False
945 if match.isexact(): # match.exact
948 if match.isexact(): # match.exact
946 exact = True
949 exact = True
947 dirignore = util.always # skip step 2
950 dirignore = util.always # skip step 2
948 elif match.prefix(): # match.match, no patterns
951 elif match.prefix(): # match.match, no patterns
949 skipstep3 = True
952 skipstep3 = True
950
953
951 if not exact and self._checkcase:
954 if not exact and self._checkcase:
952 normalize = self._normalize
955 normalize = self._normalize
953 normalizefile = self._normalizefile
956 normalizefile = self._normalizefile
954 skipstep3 = False
957 skipstep3 = False
955 else:
958 else:
956 normalize = self._normalize
959 normalize = self._normalize
957 normalizefile = None
960 normalizefile = None
958
961
959 # step 1: find all explicit files
962 # step 1: find all explicit files
960 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
963 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
961
964
962 skipstep3 = skipstep3 and not (work or dirsnotfound)
965 skipstep3 = skipstep3 and not (work or dirsnotfound)
963 work = [d for d in work if not dirignore(d[0])]
966 work = [d for d in work if not dirignore(d[0])]
964
967
965 # step 2: visit subdirectories
968 # step 2: visit subdirectories
966 def traverse(work, alreadynormed):
969 def traverse(work, alreadynormed):
967 wadd = work.append
970 wadd = work.append
968 while work:
971 while work:
969 nd = work.pop()
972 nd = work.pop()
970 skip = None
973 skip = None
971 if nd == '.':
974 if nd == '.':
972 nd = ''
975 nd = ''
973 else:
976 else:
974 skip = '.hg'
977 skip = '.hg'
975 try:
978 try:
976 entries = listdir(join(nd), stat=True, skip=skip)
979 entries = listdir(join(nd), stat=True, skip=skip)
977 except OSError as inst:
980 except OSError as inst:
978 if inst.errno in (errno.EACCES, errno.ENOENT):
981 if inst.errno in (errno.EACCES, errno.ENOENT):
979 match.bad(self.pathto(nd), inst.strerror)
982 match.bad(self.pathto(nd), inst.strerror)
980 continue
983 continue
981 raise
984 raise
982 for f, kind, st in entries:
985 for f, kind, st in entries:
983 if normalizefile:
986 if normalizefile:
984 # even though f might be a directory, we're only
987 # even though f might be a directory, we're only
985 # interested in comparing it to files currently in the
988 # interested in comparing it to files currently in the
986 # dmap -- therefore normalizefile is enough
989 # dmap -- therefore normalizefile is enough
987 nf = normalizefile(nd and (nd + "/" + f) or f, True,
990 nf = normalizefile(nd and (nd + "/" + f) or f, True,
988 True)
991 True)
989 else:
992 else:
990 nf = nd and (nd + "/" + f) or f
993 nf = nd and (nd + "/" + f) or f
991 if nf not in results:
994 if nf not in results:
992 if kind == dirkind:
995 if kind == dirkind:
993 if not ignore(nf):
996 if not ignore(nf):
994 if matchtdir:
997 if matchtdir:
995 matchtdir(nf)
998 matchtdir(nf)
996 wadd(nf)
999 wadd(nf)
997 if nf in dmap and (matchalways or matchfn(nf)):
1000 if nf in dmap and (matchalways or matchfn(nf)):
998 results[nf] = None
1001 results[nf] = None
999 elif kind == regkind or kind == lnkkind:
1002 elif kind == regkind or kind == lnkkind:
1000 if nf in dmap:
1003 if nf in dmap:
1001 if matchalways or matchfn(nf):
1004 if matchalways or matchfn(nf):
1002 results[nf] = st
1005 results[nf] = st
1003 elif ((matchalways or matchfn(nf))
1006 elif ((matchalways or matchfn(nf))
1004 and not ignore(nf)):
1007 and not ignore(nf)):
1005 # unknown file -- normalize if necessary
1008 # unknown file -- normalize if necessary
1006 if not alreadynormed:
1009 if not alreadynormed:
1007 nf = normalize(nf, False, True)
1010 nf = normalize(nf, False, True)
1008 results[nf] = st
1011 results[nf] = st
1009 elif nf in dmap and (matchalways or matchfn(nf)):
1012 elif nf in dmap and (matchalways or matchfn(nf)):
1010 results[nf] = None
1013 results[nf] = None
1011
1014
1012 for nd, d in work:
1015 for nd, d in work:
1013 # alreadynormed means that processwork doesn't have to do any
1016 # alreadynormed means that processwork doesn't have to do any
1014 # expensive directory normalization
1017 # expensive directory normalization
1015 alreadynormed = not normalize or nd == d
1018 alreadynormed = not normalize or nd == d
1016 traverse([d], alreadynormed)
1019 traverse([d], alreadynormed)
1017
1020
1018 for s in subrepos:
1021 for s in subrepos:
1019 del results[s]
1022 del results[s]
1020 del results['.hg']
1023 del results['.hg']
1021
1024
1022 # step 3: visit remaining files from dmap
1025 # step 3: visit remaining files from dmap
1023 if not skipstep3 and not exact:
1026 if not skipstep3 and not exact:
1024 # If a dmap file is not in results yet, it was either
1027 # If a dmap file is not in results yet, it was either
1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1028 # a) not matching matchfn b) ignored, c) missing, or d) under a
1026 # symlink directory.
1029 # symlink directory.
1027 if not results and matchalways:
1030 if not results and matchalways:
1028 visit = dmap.keys()
1031 visit = dmap.keys()
1029 else:
1032 else:
1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1033 visit = [f for f in dmap if f not in results and matchfn(f)]
1031 visit.sort()
1034 visit.sort()
1032
1035
1033 if unknown:
1036 if unknown:
1034 # unknown == True means we walked all dirs under the roots
1037 # unknown == True means we walked all dirs under the roots
1035 # that wasn't ignored, and everything that matched was stat'ed
1038 # that wasn't ignored, and everything that matched was stat'ed
1036 # and is already in results.
1039 # and is already in results.
1037 # The rest must thus be ignored or under a symlink.
1040 # The rest must thus be ignored or under a symlink.
1038 audit_path = pathutil.pathauditor(self._root)
1041 audit_path = pathutil.pathauditor(self._root)
1039
1042
1040 for nf in iter(visit):
1043 for nf in iter(visit):
1041 # If a stat for the same file was already added with a
1044 # If a stat for the same file was already added with a
1042 # different case, don't add one for this, since that would
1045 # different case, don't add one for this, since that would
1043 # make it appear as if the file exists under both names
1046 # make it appear as if the file exists under both names
1044 # on disk.
1047 # on disk.
1045 if (normalizefile and
1048 if (normalizefile and
1046 normalizefile(nf, True, True) in results):
1049 normalizefile(nf, True, True) in results):
1047 results[nf] = None
1050 results[nf] = None
1048 # Report ignored items in the dmap as long as they are not
1051 # Report ignored items in the dmap as long as they are not
1049 # under a symlink directory.
1052 # under a symlink directory.
1050 elif audit_path.check(nf):
1053 elif audit_path.check(nf):
1051 try:
1054 try:
1052 results[nf] = lstat(join(nf))
1055 results[nf] = lstat(join(nf))
1053 # file was just ignored, no links, and exists
1056 # file was just ignored, no links, and exists
1054 except OSError:
1057 except OSError:
1055 # file doesn't exist
1058 # file doesn't exist
1056 results[nf] = None
1059 results[nf] = None
1057 else:
1060 else:
1058 # It's either missing or under a symlink directory
1061 # It's either missing or under a symlink directory
1059 # which we in this case report as missing
1062 # which we in this case report as missing
1060 results[nf] = None
1063 results[nf] = None
1061 else:
1064 else:
1062 # We may not have walked the full directory tree above,
1065 # We may not have walked the full directory tree above,
1063 # so stat and check everything we missed.
1066 # so stat and check everything we missed.
1064 nf = iter(visit).next
1067 nf = iter(visit).next
1065 for st in util.statfiles([join(i) for i in visit]):
1068 for st in util.statfiles([join(i) for i in visit]):
1066 results[nf()] = st
1069 results[nf()] = st
1067 return results
1070 return results
1068
1071
1069 def status(self, match, subrepos, ignored, clean, unknown):
1072 def status(self, match, subrepos, ignored, clean, unknown):
1070 '''Determine the status of the working copy relative to the
1073 '''Determine the status of the working copy relative to the
1071 dirstate and return a pair of (unsure, status), where status is of type
1074 dirstate and return a pair of (unsure, status), where status is of type
1072 scmutil.status and:
1075 scmutil.status and:
1073
1076
1074 unsure:
1077 unsure:
1075 files that might have been modified since the dirstate was
1078 files that might have been modified since the dirstate was
1076 written, but need to be read to be sure (size is the same
1079 written, but need to be read to be sure (size is the same
1077 but mtime differs)
1080 but mtime differs)
1078 status.modified:
1081 status.modified:
1079 files that have definitely been modified since the dirstate
1082 files that have definitely been modified since the dirstate
1080 was written (different size or mode)
1083 was written (different size or mode)
1081 status.clean:
1084 status.clean:
1082 files that have definitely not been modified since the
1085 files that have definitely not been modified since the
1083 dirstate was written
1086 dirstate was written
1084 '''
1087 '''
1085 listignored, listclean, listunknown = ignored, clean, unknown
1088 listignored, listclean, listunknown = ignored, clean, unknown
1086 lookup, modified, added, unknown, ignored = [], [], [], [], []
1089 lookup, modified, added, unknown, ignored = [], [], [], [], []
1087 removed, deleted, clean = [], [], []
1090 removed, deleted, clean = [], [], []
1088
1091
1089 dmap = self._map
1092 dmap = self._map
1090 ladd = lookup.append # aka "unsure"
1093 ladd = lookup.append # aka "unsure"
1091 madd = modified.append
1094 madd = modified.append
1092 aadd = added.append
1095 aadd = added.append
1093 uadd = unknown.append
1096 uadd = unknown.append
1094 iadd = ignored.append
1097 iadd = ignored.append
1095 radd = removed.append
1098 radd = removed.append
1096 dadd = deleted.append
1099 dadd = deleted.append
1097 cadd = clean.append
1100 cadd = clean.append
1098 mexact = match.exact
1101 mexact = match.exact
1099 dirignore = self._dirignore
1102 dirignore = self._dirignore
1100 checkexec = self._checkexec
1103 checkexec = self._checkexec
1101 copymap = self._copymap
1104 copymap = self._copymap
1102 lastnormaltime = self._lastnormaltime
1105 lastnormaltime = self._lastnormaltime
1103
1106
1104 # We need to do full walks when either
1107 # We need to do full walks when either
1105 # - we're listing all clean files, or
1108 # - we're listing all clean files, or
1106 # - match.traversedir does something, because match.traversedir should
1109 # - match.traversedir does something, because match.traversedir should
1107 # be called for every dir in the working dir
1110 # be called for every dir in the working dir
1108 full = listclean or match.traversedir is not None
1111 full = listclean or match.traversedir is not None
1109 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1112 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1110 full=full).iteritems():
1113 full=full).iteritems():
1111 if fn not in dmap:
1114 if fn not in dmap:
1112 if (listignored or mexact(fn)) and dirignore(fn):
1115 if (listignored or mexact(fn)) and dirignore(fn):
1113 if listignored:
1116 if listignored:
1114 iadd(fn)
1117 iadd(fn)
1115 else:
1118 else:
1116 uadd(fn)
1119 uadd(fn)
1117 continue
1120 continue
1118
1121
1119 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1122 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1120 # written like that for performance reasons. dmap[fn] is not a
1123 # written like that for performance reasons. dmap[fn] is not a
1121 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1124 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1122 # opcode has fast paths when the value to be unpacked is a tuple or
1125 # opcode has fast paths when the value to be unpacked is a tuple or
1123 # a list, but falls back to creating a full-fledged iterator in
1126 # a list, but falls back to creating a full-fledged iterator in
1124 # general. That is much slower than simply accessing and storing the
1127 # general. That is much slower than simply accessing and storing the
1125 # tuple members one by one.
1128 # tuple members one by one.
1126 t = dmap[fn]
1129 t = dmap[fn]
1127 state = t[0]
1130 state = t[0]
1128 mode = t[1]
1131 mode = t[1]
1129 size = t[2]
1132 size = t[2]
1130 time = t[3]
1133 time = t[3]
1131
1134
1132 if not st and state in "nma":
1135 if not st and state in "nma":
1133 dadd(fn)
1136 dadd(fn)
1134 elif state == 'n':
1137 elif state == 'n':
1135 if (size >= 0 and
1138 if (size >= 0 and
1136 ((size != st.st_size and size != st.st_size & _rangemask)
1139 ((size != st.st_size and size != st.st_size & _rangemask)
1137 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1140 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1138 or size == -2 # other parent
1141 or size == -2 # other parent
1139 or fn in copymap):
1142 or fn in copymap):
1140 madd(fn)
1143 madd(fn)
1141 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1144 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1142 ladd(fn)
1145 ladd(fn)
1143 elif st.st_mtime == lastnormaltime:
1146 elif st.st_mtime == lastnormaltime:
1144 # fn may have just been marked as normal and it may have
1147 # fn may have just been marked as normal and it may have
1145 # changed in the same second without changing its size.
1148 # changed in the same second without changing its size.
1146 # This can happen if we quickly do multiple commits.
1149 # This can happen if we quickly do multiple commits.
1147 # Force lookup, so we don't miss such a racy file change.
1150 # Force lookup, so we don't miss such a racy file change.
1148 ladd(fn)
1151 ladd(fn)
1149 elif listclean:
1152 elif listclean:
1150 cadd(fn)
1153 cadd(fn)
1151 elif state == 'm':
1154 elif state == 'm':
1152 madd(fn)
1155 madd(fn)
1153 elif state == 'a':
1156 elif state == 'a':
1154 aadd(fn)
1157 aadd(fn)
1155 elif state == 'r':
1158 elif state == 'r':
1156 radd(fn)
1159 radd(fn)
1157
1160
1158 return (lookup, scmutil.status(modified, added, removed, deleted,
1161 return (lookup, scmutil.status(modified, added, removed, deleted,
1159 unknown, ignored, clean))
1162 unknown, ignored, clean))
1160
1163
1161 def matches(self, match):
1164 def matches(self, match):
1162 '''
1165 '''
1163 return files in the dirstate (in whatever state) filtered by match
1166 return files in the dirstate (in whatever state) filtered by match
1164 '''
1167 '''
1165 dmap = self._map
1168 dmap = self._map
1166 if match.always():
1169 if match.always():
1167 return dmap.keys()
1170 return dmap.keys()
1168 files = match.files()
1171 files = match.files()
1169 if match.isexact():
1172 if match.isexact():
1170 # fast path -- filter the other way around, since typically files is
1173 # fast path -- filter the other way around, since typically files is
1171 # much smaller than dmap
1174 # much smaller than dmap
1172 return [f for f in files if f in dmap]
1175 return [f for f in files if f in dmap]
1173 if match.prefix() and all(fn in dmap for fn in files):
1176 if match.prefix() and all(fn in dmap for fn in files):
1174 # fast path -- all the values are known to be files, so just return
1177 # fast path -- all the values are known to be files, so just return
1175 # that
1178 # that
1176 return list(files)
1179 return list(files)
1177 return [f for f in dmap if match(f)]
1180 return [f for f in dmap if match(f)]
1178
1181
1179 def _actualfilename(self, tr):
1182 def _actualfilename(self, tr):
1180 if tr:
1183 if tr:
1181 return self._pendingfilename
1184 return self._pendingfilename
1182 else:
1185 else:
1183 return self._filename
1186 return self._filename
1184
1187
1185 def _savebackup(self, tr, suffix):
1188 def _savebackup(self, tr, suffix):
1186 '''Save current dirstate into backup file with suffix'''
1189 '''Save current dirstate into backup file with suffix'''
1187 filename = self._actualfilename(tr)
1190 filename = self._actualfilename(tr)
1188
1191
1189 # use '_writedirstate' instead of 'write' to write changes certainly,
1192 # use '_writedirstate' instead of 'write' to write changes certainly,
1190 # because the latter omits writing out if transaction is running.
1193 # because the latter omits writing out if transaction is running.
1191 # output file will be used to create backup of dirstate at this point.
1194 # output file will be used to create backup of dirstate at this point.
1192 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1195 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1193
1196
1194 if tr:
1197 if tr:
1195 # ensure that subsequent tr.writepending returns True for
1198 # ensure that subsequent tr.writepending returns True for
1196 # changes written out above, even if dirstate is never
1199 # changes written out above, even if dirstate is never
1197 # changed after this
1200 # changed after this
1198 tr.addfilegenerator('dirstate', (self._filename,),
1201 tr.addfilegenerator('dirstate', (self._filename,),
1199 self._writedirstate, location='plain')
1202 self._writedirstate, location='plain')
1200
1203
1201 # ensure that pending file written above is unlinked at
1204 # ensure that pending file written above is unlinked at
1202 # failure, even if tr.writepending isn't invoked until the
1205 # failure, even if tr.writepending isn't invoked until the
1203 # end of this transaction
1206 # end of this transaction
1204 tr.registertmp(filename, location='plain')
1207 tr.registertmp(filename, location='plain')
1205
1208
1206 self._opener.write(filename + suffix, self._opener.tryread(filename))
1209 self._opener.write(filename + suffix, self._opener.tryread(filename))
1207
1210
1208 def _restorebackup(self, tr, suffix):
1211 def _restorebackup(self, tr, suffix):
1209 '''Restore dirstate by backup file with suffix'''
1212 '''Restore dirstate by backup file with suffix'''
1210 # this "invalidate()" prevents "wlock.release()" from writing
1213 # this "invalidate()" prevents "wlock.release()" from writing
1211 # changes of dirstate out after restoring from backup file
1214 # changes of dirstate out after restoring from backup file
1212 self.invalidate()
1215 self.invalidate()
1213 filename = self._actualfilename(tr)
1216 filename = self._actualfilename(tr)
1214 self._opener.rename(filename + suffix, filename)
1217 self._opener.rename(filename + suffix, filename)
1215
1218
1216 def _clearbackup(self, tr, suffix):
1219 def _clearbackup(self, tr, suffix):
1217 '''Clear backup file with suffix'''
1220 '''Clear backup file with suffix'''
1218 filename = self._actualfilename(tr)
1221 filename = self._actualfilename(tr)
1219 self._opener.unlink(filename + suffix)
1222 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now