##// END OF EJS Templates
dirstate: call the C implementation of nonnonormalentries when available...
Laurent Charignon -
r27593:bc97b9af default
parent child Browse files
Show More
@@ -1,1216 +1,1219 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 osutil,
20 osutil,
21 parsers,
21 parsers,
22 pathutil,
22 pathutil,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 propertycache = util.propertycache
27 propertycache = util.propertycache
28 filecache = scmutil.filecache
28 filecache = scmutil.filecache
29 _rangemask = 0x7fffffff
29 _rangemask = 0x7fffffff
30
30
31 dirstatetuple = parsers.dirstatetuple
31 dirstatetuple = parsers.dirstatetuple
32
32
33 class repocache(filecache):
33 class repocache(filecache):
34 """filecache for files in .hg/"""
34 """filecache for files in .hg/"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj._opener.join(fname)
36 return obj._opener.join(fname)
37
37
38 class rootcache(filecache):
38 class rootcache(filecache):
39 """filecache for files in the repository root"""
39 """filecache for files in the repository root"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._join(fname)
41 return obj._join(fname)
42
42
43 def _getfsnow(vfs):
43 def _getfsnow(vfs):
44 '''Get "now" timestamp on filesystem'''
44 '''Get "now" timestamp on filesystem'''
45 tmpfd, tmpname = vfs.mkstemp()
45 tmpfd, tmpname = vfs.mkstemp()
46 try:
46 try:
47 return os.fstat(tmpfd).st_mtime
47 return os.fstat(tmpfd).st_mtime
48 finally:
48 finally:
49 os.close(tmpfd)
49 os.close(tmpfd)
50 vfs.unlink(tmpname)
50 vfs.unlink(tmpname)
51
51
52 def nonnormalentries(dmap):
52 def nonnormalentries(dmap):
53 '''Compute the nonnormal dirstate entries from the dmap'''
53 '''Compute the nonnormal dirstate entries from the dmap'''
54 return set(fname for fname, e in dmap.iteritems()
54 try:
55 if e[0] != 'n' or e[3] == -1)
55 return parsers.nonnormalentries(dmap)
56 except AttributeError:
57 return set(fname for fname, e in dmap.iteritems()
58 if e[0] != 'n' or e[3] == -1)
56
59
57 def _trypending(root, vfs, filename):
60 def _trypending(root, vfs, filename):
58 '''Open file to be read according to HG_PENDING environment variable
61 '''Open file to be read according to HG_PENDING environment variable
59
62
60 This opens '.pending' of specified 'filename' only when HG_PENDING
63 This opens '.pending' of specified 'filename' only when HG_PENDING
61 is equal to 'root'.
64 is equal to 'root'.
62
65
63 This returns '(fp, is_pending_opened)' tuple.
66 This returns '(fp, is_pending_opened)' tuple.
64 '''
67 '''
65 if root == os.environ.get('HG_PENDING'):
68 if root == os.environ.get('HG_PENDING'):
66 try:
69 try:
67 return (vfs('%s.pending' % filename), True)
70 return (vfs('%s.pending' % filename), True)
68 except IOError as inst:
71 except IOError as inst:
69 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
70 raise
73 raise
71 return (vfs(filename), False)
74 return (vfs(filename), False)
72
75
73 class dirstate(object):
76 class dirstate(object):
74
77
75 def __init__(self, opener, ui, root, validate):
78 def __init__(self, opener, ui, root, validate):
76 '''Create a new dirstate object.
79 '''Create a new dirstate object.
77
80
78 opener is an open()-like callable that can be used to open the
81 opener is an open()-like callable that can be used to open the
79 dirstate file; root is the root of the directory tracked by
82 dirstate file; root is the root of the directory tracked by
80 the dirstate.
83 the dirstate.
81 '''
84 '''
82 self._opener = opener
85 self._opener = opener
83 self._validate = validate
86 self._validate = validate
84 self._root = root
87 self._root = root
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
88 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
86 # UNC path pointing to root share (issue4557)
89 # UNC path pointing to root share (issue4557)
87 self._rootdir = pathutil.normasprefix(root)
90 self._rootdir = pathutil.normasprefix(root)
88 # internal config: ui.forcecwd
91 # internal config: ui.forcecwd
89 forcecwd = ui.config('ui', 'forcecwd')
92 forcecwd = ui.config('ui', 'forcecwd')
90 if forcecwd:
93 if forcecwd:
91 self._cwd = forcecwd
94 self._cwd = forcecwd
92 self._dirty = False
95 self._dirty = False
93 self._dirtypl = False
96 self._dirtypl = False
94 self._lastnormaltime = 0
97 self._lastnormaltime = 0
95 self._ui = ui
98 self._ui = ui
96 self._filecache = {}
99 self._filecache = {}
97 self._parentwriters = 0
100 self._parentwriters = 0
98 self._filename = 'dirstate'
101 self._filename = 'dirstate'
99 self._pendingfilename = '%s.pending' % self._filename
102 self._pendingfilename = '%s.pending' % self._filename
100
103
101 # for consistent view between _pl() and _read() invocations
104 # for consistent view between _pl() and _read() invocations
102 self._pendingmode = None
105 self._pendingmode = None
103
106
104 def beginparentchange(self):
107 def beginparentchange(self):
105 '''Marks the beginning of a set of changes that involve changing
108 '''Marks the beginning of a set of changes that involve changing
106 the dirstate parents. If there is an exception during this time,
109 the dirstate parents. If there is an exception during this time,
107 the dirstate will not be written when the wlock is released. This
110 the dirstate will not be written when the wlock is released. This
108 prevents writing an incoherent dirstate where the parent doesn't
111 prevents writing an incoherent dirstate where the parent doesn't
109 match the contents.
112 match the contents.
110 '''
113 '''
111 self._parentwriters += 1
114 self._parentwriters += 1
112
115
113 def endparentchange(self):
116 def endparentchange(self):
114 '''Marks the end of a set of changes that involve changing the
117 '''Marks the end of a set of changes that involve changing the
115 dirstate parents. Once all parent changes have been marked done,
118 dirstate parents. Once all parent changes have been marked done,
116 the wlock will be free to write the dirstate on release.
119 the wlock will be free to write the dirstate on release.
117 '''
120 '''
118 if self._parentwriters > 0:
121 if self._parentwriters > 0:
119 self._parentwriters -= 1
122 self._parentwriters -= 1
120
123
121 def pendingparentchange(self):
124 def pendingparentchange(self):
122 '''Returns true if the dirstate is in the middle of a set of changes
125 '''Returns true if the dirstate is in the middle of a set of changes
123 that modify the dirstate parent.
126 that modify the dirstate parent.
124 '''
127 '''
125 return self._parentwriters > 0
128 return self._parentwriters > 0
126
129
127 @propertycache
130 @propertycache
128 def _map(self):
131 def _map(self):
129 '''Return the dirstate contents as a map from filename to
132 '''Return the dirstate contents as a map from filename to
130 (state, mode, size, time).'''
133 (state, mode, size, time).'''
131 self._read()
134 self._read()
132 return self._map
135 return self._map
133
136
134 @propertycache
137 @propertycache
135 def _copymap(self):
138 def _copymap(self):
136 self._read()
139 self._read()
137 return self._copymap
140 return self._copymap
138
141
139 @propertycache
142 @propertycache
140 def _nonnormalset(self):
143 def _nonnormalset(self):
141 return nonnormalentries(self._map)
144 return nonnormalentries(self._map)
142
145
143 @propertycache
146 @propertycache
144 def _filefoldmap(self):
147 def _filefoldmap(self):
145 try:
148 try:
146 makefilefoldmap = parsers.make_file_foldmap
149 makefilefoldmap = parsers.make_file_foldmap
147 except AttributeError:
150 except AttributeError:
148 pass
151 pass
149 else:
152 else:
150 return makefilefoldmap(self._map, util.normcasespec,
153 return makefilefoldmap(self._map, util.normcasespec,
151 util.normcasefallback)
154 util.normcasefallback)
152
155
153 f = {}
156 f = {}
154 normcase = util.normcase
157 normcase = util.normcase
155 for name, s in self._map.iteritems():
158 for name, s in self._map.iteritems():
156 if s[0] != 'r':
159 if s[0] != 'r':
157 f[normcase(name)] = name
160 f[normcase(name)] = name
158 f['.'] = '.' # prevents useless util.fspath() invocation
161 f['.'] = '.' # prevents useless util.fspath() invocation
159 return f
162 return f
160
163
161 @propertycache
164 @propertycache
162 def _dirfoldmap(self):
165 def _dirfoldmap(self):
163 f = {}
166 f = {}
164 normcase = util.normcase
167 normcase = util.normcase
165 for name in self._dirs:
168 for name in self._dirs:
166 f[normcase(name)] = name
169 f[normcase(name)] = name
167 return f
170 return f
168
171
169 @repocache('branch')
172 @repocache('branch')
170 def _branch(self):
173 def _branch(self):
171 try:
174 try:
172 return self._opener.read("branch").strip() or "default"
175 return self._opener.read("branch").strip() or "default"
173 except IOError as inst:
176 except IOError as inst:
174 if inst.errno != errno.ENOENT:
177 if inst.errno != errno.ENOENT:
175 raise
178 raise
176 return "default"
179 return "default"
177
180
178 @propertycache
181 @propertycache
179 def _pl(self):
182 def _pl(self):
180 try:
183 try:
181 fp = self._opendirstatefile()
184 fp = self._opendirstatefile()
182 st = fp.read(40)
185 st = fp.read(40)
183 fp.close()
186 fp.close()
184 l = len(st)
187 l = len(st)
185 if l == 40:
188 if l == 40:
186 return st[:20], st[20:40]
189 return st[:20], st[20:40]
187 elif l > 0 and l < 40:
190 elif l > 0 and l < 40:
188 raise error.Abort(_('working directory state appears damaged!'))
191 raise error.Abort(_('working directory state appears damaged!'))
189 except IOError as err:
192 except IOError as err:
190 if err.errno != errno.ENOENT:
193 if err.errno != errno.ENOENT:
191 raise
194 raise
192 return [nullid, nullid]
195 return [nullid, nullid]
193
196
194 @propertycache
197 @propertycache
195 def _dirs(self):
198 def _dirs(self):
196 return util.dirs(self._map, 'r')
199 return util.dirs(self._map, 'r')
197
200
198 def dirs(self):
201 def dirs(self):
199 return self._dirs
202 return self._dirs
200
203
201 @rootcache('.hgignore')
204 @rootcache('.hgignore')
202 def _ignore(self):
205 def _ignore(self):
203 files = []
206 files = []
204 if os.path.exists(self._join('.hgignore')):
207 if os.path.exists(self._join('.hgignore')):
205 files.append(self._join('.hgignore'))
208 files.append(self._join('.hgignore'))
206 for name, path in self._ui.configitems("ui"):
209 for name, path in self._ui.configitems("ui"):
207 if name == 'ignore' or name.startswith('ignore.'):
210 if name == 'ignore' or name.startswith('ignore.'):
208 # we need to use os.path.join here rather than self._join
211 # we need to use os.path.join here rather than self._join
209 # because path is arbitrary and user-specified
212 # because path is arbitrary and user-specified
210 files.append(os.path.join(self._rootdir, util.expandpath(path)))
213 files.append(os.path.join(self._rootdir, util.expandpath(path)))
211
214
212 if not files:
215 if not files:
213 return util.never
216 return util.never
214
217
215 pats = ['include:%s' % f for f in files]
218 pats = ['include:%s' % f for f in files]
216 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
219 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
217
220
218 @propertycache
221 @propertycache
219 def _slash(self):
222 def _slash(self):
220 return self._ui.configbool('ui', 'slash') and os.sep != '/'
223 return self._ui.configbool('ui', 'slash') and os.sep != '/'
221
224
222 @propertycache
225 @propertycache
223 def _checklink(self):
226 def _checklink(self):
224 return util.checklink(self._root)
227 return util.checklink(self._root)
225
228
226 @propertycache
229 @propertycache
227 def _checkexec(self):
230 def _checkexec(self):
228 return util.checkexec(self._root)
231 return util.checkexec(self._root)
229
232
230 @propertycache
233 @propertycache
231 def _checkcase(self):
234 def _checkcase(self):
232 return not util.checkcase(self._join('.hg'))
235 return not util.checkcase(self._join('.hg'))
233
236
234 def _join(self, f):
237 def _join(self, f):
235 # much faster than os.path.join()
238 # much faster than os.path.join()
236 # it's safe because f is always a relative path
239 # it's safe because f is always a relative path
237 return self._rootdir + f
240 return self._rootdir + f
238
241
239 def flagfunc(self, buildfallback):
242 def flagfunc(self, buildfallback):
240 if self._checklink and self._checkexec:
243 if self._checklink and self._checkexec:
241 def f(x):
244 def f(x):
242 try:
245 try:
243 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
244 if util.statislink(st):
247 if util.statislink(st):
245 return 'l'
248 return 'l'
246 if util.statisexec(st):
249 if util.statisexec(st):
247 return 'x'
250 return 'x'
248 except OSError:
251 except OSError:
249 pass
252 pass
250 return ''
253 return ''
251 return f
254 return f
252
255
253 fallback = buildfallback()
256 fallback = buildfallback()
254 if self._checklink:
257 if self._checklink:
255 def f(x):
258 def f(x):
256 if os.path.islink(self._join(x)):
259 if os.path.islink(self._join(x)):
257 return 'l'
260 return 'l'
258 if 'x' in fallback(x):
261 if 'x' in fallback(x):
259 return 'x'
262 return 'x'
260 return ''
263 return ''
261 return f
264 return f
262 if self._checkexec:
265 if self._checkexec:
263 def f(x):
266 def f(x):
264 if 'l' in fallback(x):
267 if 'l' in fallback(x):
265 return 'l'
268 return 'l'
266 if util.isexec(self._join(x)):
269 if util.isexec(self._join(x)):
267 return 'x'
270 return 'x'
268 return ''
271 return ''
269 return f
272 return f
270 else:
273 else:
271 return fallback
274 return fallback
272
275
273 @propertycache
276 @propertycache
274 def _cwd(self):
277 def _cwd(self):
275 return os.getcwd()
278 return os.getcwd()
276
279
277 def getcwd(self):
280 def getcwd(self):
278 '''Return the path from which a canonical path is calculated.
281 '''Return the path from which a canonical path is calculated.
279
282
280 This path should be used to resolve file patterns or to convert
283 This path should be used to resolve file patterns or to convert
281 canonical paths back to file paths for display. It shouldn't be
284 canonical paths back to file paths for display. It shouldn't be
282 used to get real file paths. Use vfs functions instead.
285 used to get real file paths. Use vfs functions instead.
283 '''
286 '''
284 cwd = self._cwd
287 cwd = self._cwd
285 if cwd == self._root:
288 if cwd == self._root:
286 return ''
289 return ''
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
290 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 rootsep = self._root
291 rootsep = self._root
289 if not util.endswithsep(rootsep):
292 if not util.endswithsep(rootsep):
290 rootsep += os.sep
293 rootsep += os.sep
291 if cwd.startswith(rootsep):
294 if cwd.startswith(rootsep):
292 return cwd[len(rootsep):]
295 return cwd[len(rootsep):]
293 else:
296 else:
294 # we're outside the repo. return an absolute path.
297 # we're outside the repo. return an absolute path.
295 return cwd
298 return cwd
296
299
297 def pathto(self, f, cwd=None):
300 def pathto(self, f, cwd=None):
298 if cwd is None:
301 if cwd is None:
299 cwd = self.getcwd()
302 cwd = self.getcwd()
300 path = util.pathto(self._root, cwd, f)
303 path = util.pathto(self._root, cwd, f)
301 if self._slash:
304 if self._slash:
302 return util.pconvert(path)
305 return util.pconvert(path)
303 return path
306 return path
304
307
305 def __getitem__(self, key):
308 def __getitem__(self, key):
306 '''Return the current state of key (a filename) in the dirstate.
309 '''Return the current state of key (a filename) in the dirstate.
307
310
308 States are:
311 States are:
309 n normal
312 n normal
310 m needs merging
313 m needs merging
311 r marked for removal
314 r marked for removal
312 a marked for addition
315 a marked for addition
313 ? not tracked
316 ? not tracked
314 '''
317 '''
315 return self._map.get(key, ("?",))[0]
318 return self._map.get(key, ("?",))[0]
316
319
317 def __contains__(self, key):
320 def __contains__(self, key):
318 return key in self._map
321 return key in self._map
319
322
320 def __iter__(self):
323 def __iter__(self):
321 for x in sorted(self._map):
324 for x in sorted(self._map):
322 yield x
325 yield x
323
326
324 def iteritems(self):
327 def iteritems(self):
325 return self._map.iteritems()
328 return self._map.iteritems()
326
329
327 def parents(self):
330 def parents(self):
328 return [self._validate(p) for p in self._pl]
331 return [self._validate(p) for p in self._pl]
329
332
330 def p1(self):
333 def p1(self):
331 return self._validate(self._pl[0])
334 return self._validate(self._pl[0])
332
335
333 def p2(self):
336 def p2(self):
334 return self._validate(self._pl[1])
337 return self._validate(self._pl[1])
335
338
336 def branch(self):
339 def branch(self):
337 return encoding.tolocal(self._branch)
340 return encoding.tolocal(self._branch)
338
341
339 def setparents(self, p1, p2=nullid):
342 def setparents(self, p1, p2=nullid):
340 """Set dirstate parents to p1 and p2.
343 """Set dirstate parents to p1 and p2.
341
344
342 When moving from two parents to one, 'm' merged entries a
345 When moving from two parents to one, 'm' merged entries a
343 adjusted to normal and previous copy records discarded and
346 adjusted to normal and previous copy records discarded and
344 returned by the call.
347 returned by the call.
345
348
346 See localrepo.setparents()
349 See localrepo.setparents()
347 """
350 """
348 if self._parentwriters == 0:
351 if self._parentwriters == 0:
349 raise ValueError("cannot set dirstate parent without "
352 raise ValueError("cannot set dirstate parent without "
350 "calling dirstate.beginparentchange")
353 "calling dirstate.beginparentchange")
351
354
352 self._dirty = self._dirtypl = True
355 self._dirty = self._dirtypl = True
353 oldp2 = self._pl[1]
356 oldp2 = self._pl[1]
354 self._pl = p1, p2
357 self._pl = p1, p2
355 copies = {}
358 copies = {}
356 if oldp2 != nullid and p2 == nullid:
359 if oldp2 != nullid and p2 == nullid:
357 for f, s in self._map.iteritems():
360 for f, s in self._map.iteritems():
358 # Discard 'm' markers when moving away from a merge state
361 # Discard 'm' markers when moving away from a merge state
359 if s[0] == 'm':
362 if s[0] == 'm':
360 if f in self._copymap:
363 if f in self._copymap:
361 copies[f] = self._copymap[f]
364 copies[f] = self._copymap[f]
362 self.normallookup(f)
365 self.normallookup(f)
363 # Also fix up otherparent markers
366 # Also fix up otherparent markers
364 elif s[0] == 'n' and s[2] == -2:
367 elif s[0] == 'n' and s[2] == -2:
365 if f in self._copymap:
368 if f in self._copymap:
366 copies[f] = self._copymap[f]
369 copies[f] = self._copymap[f]
367 self.add(f)
370 self.add(f)
368 return copies
371 return copies
369
372
370 def setbranch(self, branch):
373 def setbranch(self, branch):
371 self._branch = encoding.fromlocal(branch)
374 self._branch = encoding.fromlocal(branch)
372 f = self._opener('branch', 'w', atomictemp=True)
375 f = self._opener('branch', 'w', atomictemp=True)
373 try:
376 try:
374 f.write(self._branch + '\n')
377 f.write(self._branch + '\n')
375 f.close()
378 f.close()
376
379
377 # make sure filecache has the correct stat info for _branch after
380 # make sure filecache has the correct stat info for _branch after
378 # replacing the underlying file
381 # replacing the underlying file
379 ce = self._filecache['_branch']
382 ce = self._filecache['_branch']
380 if ce:
383 if ce:
381 ce.refresh()
384 ce.refresh()
382 except: # re-raises
385 except: # re-raises
383 f.discard()
386 f.discard()
384 raise
387 raise
385
388
386 def _opendirstatefile(self):
389 def _opendirstatefile(self):
387 fp, mode = _trypending(self._root, self._opener, self._filename)
390 fp, mode = _trypending(self._root, self._opener, self._filename)
388 if self._pendingmode is not None and self._pendingmode != mode:
391 if self._pendingmode is not None and self._pendingmode != mode:
389 fp.close()
392 fp.close()
390 raise error.Abort(_('working directory state may be '
393 raise error.Abort(_('working directory state may be '
391 'changed parallelly'))
394 'changed parallelly'))
392 self._pendingmode = mode
395 self._pendingmode = mode
393 return fp
396 return fp
394
397
395 def _read(self):
398 def _read(self):
396 self._map = {}
399 self._map = {}
397 self._copymap = {}
400 self._copymap = {}
398 try:
401 try:
399 fp = self._opendirstatefile()
402 fp = self._opendirstatefile()
400 try:
403 try:
401 st = fp.read()
404 st = fp.read()
402 finally:
405 finally:
403 fp.close()
406 fp.close()
404 except IOError as err:
407 except IOError as err:
405 if err.errno != errno.ENOENT:
408 if err.errno != errno.ENOENT:
406 raise
409 raise
407 return
410 return
408 if not st:
411 if not st:
409 return
412 return
410
413
411 if util.safehasattr(parsers, 'dict_new_presized'):
414 if util.safehasattr(parsers, 'dict_new_presized'):
412 # Make an estimate of the number of files in the dirstate based on
415 # Make an estimate of the number of files in the dirstate based on
413 # its size. From a linear regression on a set of real-world repos,
416 # its size. From a linear regression on a set of real-world repos,
414 # all over 10,000 files, the size of a dirstate entry is 85
417 # all over 10,000 files, the size of a dirstate entry is 85
415 # bytes. The cost of resizing is significantly higher than the cost
418 # bytes. The cost of resizing is significantly higher than the cost
416 # of filling in a larger presized dict, so subtract 20% from the
419 # of filling in a larger presized dict, so subtract 20% from the
417 # size.
420 # size.
418 #
421 #
419 # This heuristic is imperfect in many ways, so in a future dirstate
422 # This heuristic is imperfect in many ways, so in a future dirstate
420 # format update it makes sense to just record the number of entries
423 # format update it makes sense to just record the number of entries
421 # on write.
424 # on write.
422 self._map = parsers.dict_new_presized(len(st) / 71)
425 self._map = parsers.dict_new_presized(len(st) / 71)
423
426
424 # Python's garbage collector triggers a GC each time a certain number
427 # Python's garbage collector triggers a GC each time a certain number
425 # of container objects (the number being defined by
428 # of container objects (the number being defined by
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
429 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 # for each file in the dirstate. The C version then immediately marks
430 # for each file in the dirstate. The C version then immediately marks
428 # them as not to be tracked by the collector. However, this has no
431 # them as not to be tracked by the collector. However, this has no
429 # effect on when GCs are triggered, only on what objects the GC looks
432 # effect on when GCs are triggered, only on what objects the GC looks
430 # into. This means that O(number of files) GCs are unavoidable.
433 # into. This means that O(number of files) GCs are unavoidable.
431 # Depending on when in the process's lifetime the dirstate is parsed,
434 # Depending on when in the process's lifetime the dirstate is parsed,
432 # this can get very expensive. As a workaround, disable GC while
435 # this can get very expensive. As a workaround, disable GC while
433 # parsing the dirstate.
436 # parsing the dirstate.
434 #
437 #
435 # (we cannot decorate the function directly since it is in a C module)
438 # (we cannot decorate the function directly since it is in a C module)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
439 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 p = parse_dirstate(self._map, self._copymap, st)
440 p = parse_dirstate(self._map, self._copymap, st)
438 if not self._dirtypl:
441 if not self._dirtypl:
439 self._pl = p
442 self._pl = p
440
443
441 def invalidate(self):
444 def invalidate(self):
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
445 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
446 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 if a in self.__dict__:
447 if a in self.__dict__:
445 delattr(self, a)
448 delattr(self, a)
446 self._lastnormaltime = 0
449 self._lastnormaltime = 0
447 self._dirty = False
450 self._dirty = False
448 self._parentwriters = 0
451 self._parentwriters = 0
449
452
450 def copy(self, source, dest):
453 def copy(self, source, dest):
451 """Mark dest as a copy of source. Unmark dest if source is None."""
454 """Mark dest as a copy of source. Unmark dest if source is None."""
452 if source == dest:
455 if source == dest:
453 return
456 return
454 self._dirty = True
457 self._dirty = True
455 if source is not None:
458 if source is not None:
456 self._copymap[dest] = source
459 self._copymap[dest] = source
457 elif dest in self._copymap:
460 elif dest in self._copymap:
458 del self._copymap[dest]
461 del self._copymap[dest]
459
462
460 def copied(self, file):
463 def copied(self, file):
461 return self._copymap.get(file, None)
464 return self._copymap.get(file, None)
462
465
463 def copies(self):
466 def copies(self):
464 return self._copymap
467 return self._copymap
465
468
466 def _droppath(self, f):
469 def _droppath(self, f):
467 if self[f] not in "?r" and "_dirs" in self.__dict__:
470 if self[f] not in "?r" and "_dirs" in self.__dict__:
468 self._dirs.delpath(f)
471 self._dirs.delpath(f)
469
472
470 if "_filefoldmap" in self.__dict__:
473 if "_filefoldmap" in self.__dict__:
471 normed = util.normcase(f)
474 normed = util.normcase(f)
472 if normed in self._filefoldmap:
475 if normed in self._filefoldmap:
473 del self._filefoldmap[normed]
476 del self._filefoldmap[normed]
474
477
475 def _addpath(self, f, state, mode, size, mtime):
478 def _addpath(self, f, state, mode, size, mtime):
476 oldstate = self[f]
479 oldstate = self[f]
477 if state == 'a' or oldstate == 'r':
480 if state == 'a' or oldstate == 'r':
478 scmutil.checkfilename(f)
481 scmutil.checkfilename(f)
479 if f in self._dirs:
482 if f in self._dirs:
480 raise error.Abort(_('directory %r already in dirstate') % f)
483 raise error.Abort(_('directory %r already in dirstate') % f)
481 # shadows
484 # shadows
482 for d in util.finddirs(f):
485 for d in util.finddirs(f):
483 if d in self._dirs:
486 if d in self._dirs:
484 break
487 break
485 if d in self._map and self[d] != 'r':
488 if d in self._map and self[d] != 'r':
486 raise error.Abort(
489 raise error.Abort(
487 _('file %r in dirstate clashes with %r') % (d, f))
490 _('file %r in dirstate clashes with %r') % (d, f))
488 if oldstate in "?r" and "_dirs" in self.__dict__:
491 if oldstate in "?r" and "_dirs" in self.__dict__:
489 self._dirs.addpath(f)
492 self._dirs.addpath(f)
490 self._dirty = True
493 self._dirty = True
491 self._map[f] = dirstatetuple(state, mode, size, mtime)
494 self._map[f] = dirstatetuple(state, mode, size, mtime)
492 if state != 'n' or mtime == -1:
495 if state != 'n' or mtime == -1:
493 self._nonnormalset.add(f)
496 self._nonnormalset.add(f)
494
497
495 def normal(self, f):
498 def normal(self, f):
496 '''Mark a file normal and clean.'''
499 '''Mark a file normal and clean.'''
497 s = os.lstat(self._join(f))
500 s = os.lstat(self._join(f))
498 mtime = s.st_mtime
501 mtime = s.st_mtime
499 self._addpath(f, 'n', s.st_mode,
502 self._addpath(f, 'n', s.st_mode,
500 s.st_size & _rangemask, mtime & _rangemask)
503 s.st_size & _rangemask, mtime & _rangemask)
501 if f in self._copymap:
504 if f in self._copymap:
502 del self._copymap[f]
505 del self._copymap[f]
503 if f in self._nonnormalset:
506 if f in self._nonnormalset:
504 self._nonnormalset.remove(f)
507 self._nonnormalset.remove(f)
505 if mtime > self._lastnormaltime:
508 if mtime > self._lastnormaltime:
506 # Remember the most recent modification timeslot for status(),
509 # Remember the most recent modification timeslot for status(),
507 # to make sure we won't miss future size-preserving file content
510 # to make sure we won't miss future size-preserving file content
508 # modifications that happen within the same timeslot.
511 # modifications that happen within the same timeslot.
509 self._lastnormaltime = mtime
512 self._lastnormaltime = mtime
510
513
511 def normallookup(self, f):
514 def normallookup(self, f):
512 '''Mark a file normal, but possibly dirty.'''
515 '''Mark a file normal, but possibly dirty.'''
513 if self._pl[1] != nullid and f in self._map:
516 if self._pl[1] != nullid and f in self._map:
514 # if there is a merge going on and the file was either
517 # if there is a merge going on and the file was either
515 # in state 'm' (-1) or coming from other parent (-2) before
518 # in state 'm' (-1) or coming from other parent (-2) before
516 # being removed, restore that state.
519 # being removed, restore that state.
517 entry = self._map[f]
520 entry = self._map[f]
518 if entry[0] == 'r' and entry[2] in (-1, -2):
521 if entry[0] == 'r' and entry[2] in (-1, -2):
519 source = self._copymap.get(f)
522 source = self._copymap.get(f)
520 if entry[2] == -1:
523 if entry[2] == -1:
521 self.merge(f)
524 self.merge(f)
522 elif entry[2] == -2:
525 elif entry[2] == -2:
523 self.otherparent(f)
526 self.otherparent(f)
524 if source:
527 if source:
525 self.copy(source, f)
528 self.copy(source, f)
526 return
529 return
527 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
530 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
528 return
531 return
529 self._addpath(f, 'n', 0, -1, -1)
532 self._addpath(f, 'n', 0, -1, -1)
530 if f in self._copymap:
533 if f in self._copymap:
531 del self._copymap[f]
534 del self._copymap[f]
532 if f in self._nonnormalset:
535 if f in self._nonnormalset:
533 self._nonnormalset.remove(f)
536 self._nonnormalset.remove(f)
534
537
535 def otherparent(self, f):
538 def otherparent(self, f):
536 '''Mark as coming from the other parent, always dirty.'''
539 '''Mark as coming from the other parent, always dirty.'''
537 if self._pl[1] == nullid:
540 if self._pl[1] == nullid:
538 raise error.Abort(_("setting %r to other parent "
541 raise error.Abort(_("setting %r to other parent "
539 "only allowed in merges") % f)
542 "only allowed in merges") % f)
540 if f in self and self[f] == 'n':
543 if f in self and self[f] == 'n':
541 # merge-like
544 # merge-like
542 self._addpath(f, 'm', 0, -2, -1)
545 self._addpath(f, 'm', 0, -2, -1)
543 else:
546 else:
544 # add-like
547 # add-like
545 self._addpath(f, 'n', 0, -2, -1)
548 self._addpath(f, 'n', 0, -2, -1)
546
549
547 if f in self._copymap:
550 if f in self._copymap:
548 del self._copymap[f]
551 del self._copymap[f]
549
552
550 def add(self, f):
553 def add(self, f):
551 '''Mark a file added.'''
554 '''Mark a file added.'''
552 self._addpath(f, 'a', 0, -1, -1)
555 self._addpath(f, 'a', 0, -1, -1)
553 if f in self._copymap:
556 if f in self._copymap:
554 del self._copymap[f]
557 del self._copymap[f]
555
558
556 def remove(self, f):
559 def remove(self, f):
557 '''Mark a file removed.'''
560 '''Mark a file removed.'''
558 self._dirty = True
561 self._dirty = True
559 self._droppath(f)
562 self._droppath(f)
560 size = 0
563 size = 0
561 if self._pl[1] != nullid and f in self._map:
564 if self._pl[1] != nullid and f in self._map:
562 # backup the previous state
565 # backup the previous state
563 entry = self._map[f]
566 entry = self._map[f]
564 if entry[0] == 'm': # merge
567 if entry[0] == 'm': # merge
565 size = -1
568 size = -1
566 elif entry[0] == 'n' and entry[2] == -2: # other parent
569 elif entry[0] == 'n' and entry[2] == -2: # other parent
567 size = -2
570 size = -2
568 self._map[f] = dirstatetuple('r', 0, size, 0)
571 self._map[f] = dirstatetuple('r', 0, size, 0)
569 self._nonnormalset.add(f)
572 self._nonnormalset.add(f)
570 if size == 0 and f in self._copymap:
573 if size == 0 and f in self._copymap:
571 del self._copymap[f]
574 del self._copymap[f]
572
575
573 def merge(self, f):
576 def merge(self, f):
574 '''Mark a file merged.'''
577 '''Mark a file merged.'''
575 if self._pl[1] == nullid:
578 if self._pl[1] == nullid:
576 return self.normallookup(f)
579 return self.normallookup(f)
577 return self.otherparent(f)
580 return self.otherparent(f)
578
581
579 def drop(self, f):
582 def drop(self, f):
580 '''Drop a file from the dirstate'''
583 '''Drop a file from the dirstate'''
581 if f in self._map:
584 if f in self._map:
582 self._dirty = True
585 self._dirty = True
583 self._droppath(f)
586 self._droppath(f)
584 del self._map[f]
587 del self._map[f]
585 if f in self._nonnormalset:
588 if f in self._nonnormalset:
586 self._nonnormalset.remove(f)
589 self._nonnormalset.remove(f)
587
590
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
589 if exists is None:
592 if exists is None:
590 exists = os.path.lexists(os.path.join(self._root, path))
593 exists = os.path.lexists(os.path.join(self._root, path))
591 if not exists:
594 if not exists:
592 # Maybe a path component exists
595 # Maybe a path component exists
593 if not ignoremissing and '/' in path:
596 if not ignoremissing and '/' in path:
594 d, f = path.rsplit('/', 1)
597 d, f = path.rsplit('/', 1)
595 d = self._normalize(d, False, ignoremissing, None)
598 d = self._normalize(d, False, ignoremissing, None)
596 folded = d + "/" + f
599 folded = d + "/" + f
597 else:
600 else:
598 # No path components, preserve original case
601 # No path components, preserve original case
599 folded = path
602 folded = path
600 else:
603 else:
601 # recursively normalize leading directory components
604 # recursively normalize leading directory components
602 # against dirstate
605 # against dirstate
603 if '/' in normed:
606 if '/' in normed:
604 d, f = normed.rsplit('/', 1)
607 d, f = normed.rsplit('/', 1)
605 d = self._normalize(d, False, ignoremissing, True)
608 d = self._normalize(d, False, ignoremissing, True)
606 r = self._root + "/" + d
609 r = self._root + "/" + d
607 folded = d + "/" + util.fspath(f, r)
610 folded = d + "/" + util.fspath(f, r)
608 else:
611 else:
609 folded = util.fspath(normed, self._root)
612 folded = util.fspath(normed, self._root)
610 storemap[normed] = folded
613 storemap[normed] = folded
611
614
612 return folded
615 return folded
613
616
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
615 normed = util.normcase(path)
618 normed = util.normcase(path)
616 folded = self._filefoldmap.get(normed, None)
619 folded = self._filefoldmap.get(normed, None)
617 if folded is None:
620 if folded is None:
618 if isknown:
621 if isknown:
619 folded = path
622 folded = path
620 else:
623 else:
621 folded = self._discoverpath(path, normed, ignoremissing, exists,
624 folded = self._discoverpath(path, normed, ignoremissing, exists,
622 self._filefoldmap)
625 self._filefoldmap)
623 return folded
626 return folded
624
627
625 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
626 normed = util.normcase(path)
629 normed = util.normcase(path)
627 folded = self._filefoldmap.get(normed, None)
630 folded = self._filefoldmap.get(normed, None)
628 if folded is None:
631 if folded is None:
629 folded = self._dirfoldmap.get(normed, None)
632 folded = self._dirfoldmap.get(normed, None)
630 if folded is None:
633 if folded is None:
631 if isknown:
634 if isknown:
632 folded = path
635 folded = path
633 else:
636 else:
634 # store discovered result in dirfoldmap so that future
637 # store discovered result in dirfoldmap so that future
635 # normalizefile calls don't start matching directories
638 # normalizefile calls don't start matching directories
636 folded = self._discoverpath(path, normed, ignoremissing, exists,
639 folded = self._discoverpath(path, normed, ignoremissing, exists,
637 self._dirfoldmap)
640 self._dirfoldmap)
638 return folded
641 return folded
639
642
640 def normalize(self, path, isknown=False, ignoremissing=False):
643 def normalize(self, path, isknown=False, ignoremissing=False):
641 '''
644 '''
642 normalize the case of a pathname when on a casefolding filesystem
645 normalize the case of a pathname when on a casefolding filesystem
643
646
644 isknown specifies whether the filename came from walking the
647 isknown specifies whether the filename came from walking the
645 disk, to avoid extra filesystem access.
648 disk, to avoid extra filesystem access.
646
649
647 If ignoremissing is True, missing path are returned
650 If ignoremissing is True, missing path are returned
648 unchanged. Otherwise, we try harder to normalize possibly
651 unchanged. Otherwise, we try harder to normalize possibly
649 existing path components.
652 existing path components.
650
653
651 The normalized case is determined based on the following precedence:
654 The normalized case is determined based on the following precedence:
652
655
653 - version of name already stored in the dirstate
656 - version of name already stored in the dirstate
654 - version of name stored on disk
657 - version of name stored on disk
655 - version provided via command arguments
658 - version provided via command arguments
656 '''
659 '''
657
660
658 if self._checkcase:
661 if self._checkcase:
659 return self._normalize(path, isknown, ignoremissing)
662 return self._normalize(path, isknown, ignoremissing)
660 return path
663 return path
661
664
662 def clear(self):
665 def clear(self):
663 self._map = {}
666 self._map = {}
664 self._nonnormalset = set()
667 self._nonnormalset = set()
665 if "_dirs" in self.__dict__:
668 if "_dirs" in self.__dict__:
666 delattr(self, "_dirs")
669 delattr(self, "_dirs")
667 self._copymap = {}
670 self._copymap = {}
668 self._pl = [nullid, nullid]
671 self._pl = [nullid, nullid]
669 self._lastnormaltime = 0
672 self._lastnormaltime = 0
670 self._dirty = True
673 self._dirty = True
671
674
672 def rebuild(self, parent, allfiles, changedfiles=None):
675 def rebuild(self, parent, allfiles, changedfiles=None):
673 if changedfiles is None:
676 if changedfiles is None:
674 # Rebuild entire dirstate
677 # Rebuild entire dirstate
675 changedfiles = allfiles
678 changedfiles = allfiles
676 lastnormaltime = self._lastnormaltime
679 lastnormaltime = self._lastnormaltime
677 self.clear()
680 self.clear()
678 self._lastnormaltime = lastnormaltime
681 self._lastnormaltime = lastnormaltime
679
682
680 for f in changedfiles:
683 for f in changedfiles:
681 mode = 0o666
684 mode = 0o666
682 if f in allfiles and 'x' in allfiles.flags(f):
685 if f in allfiles and 'x' in allfiles.flags(f):
683 mode = 0o777
686 mode = 0o777
684
687
685 if f in allfiles:
688 if f in allfiles:
686 self._map[f] = dirstatetuple('n', mode, -1, 0)
689 self._map[f] = dirstatetuple('n', mode, -1, 0)
687 else:
690 else:
688 self._map.pop(f, None)
691 self._map.pop(f, None)
689 if f in self._nonnormalset:
692 if f in self._nonnormalset:
690 self._nonnormalset.remove(f)
693 self._nonnormalset.remove(f)
691
694
692 self._pl = (parent, nullid)
695 self._pl = (parent, nullid)
693 self._dirty = True
696 self._dirty = True
694
697
695 def write(self, tr=False):
698 def write(self, tr=False):
696 if not self._dirty:
699 if not self._dirty:
697 return
700 return
698
701
699 filename = self._filename
702 filename = self._filename
700 if tr is False: # not explicitly specified
703 if tr is False: # not explicitly specified
701 if (self._ui.configbool('devel', 'all-warnings')
704 if (self._ui.configbool('devel', 'all-warnings')
702 or self._ui.configbool('devel', 'check-dirstate-write')):
705 or self._ui.configbool('devel', 'check-dirstate-write')):
703 self._ui.develwarn('use dirstate.write with '
706 self._ui.develwarn('use dirstate.write with '
704 'repo.currenttransaction()')
707 'repo.currenttransaction()')
705
708
706 if self._opener.lexists(self._pendingfilename):
709 if self._opener.lexists(self._pendingfilename):
707 # if pending file already exists, in-memory changes
710 # if pending file already exists, in-memory changes
708 # should be written into it, because it has priority
711 # should be written into it, because it has priority
709 # to '.hg/dirstate' at reading under HG_PENDING mode
712 # to '.hg/dirstate' at reading under HG_PENDING mode
710 filename = self._pendingfilename
713 filename = self._pendingfilename
711 elif tr:
714 elif tr:
712 # 'dirstate.write()' is not only for writing in-memory
715 # 'dirstate.write()' is not only for writing in-memory
713 # changes out, but also for dropping ambiguous timestamp.
716 # changes out, but also for dropping ambiguous timestamp.
714 # delayed writing re-raise "ambiguous timestamp issue".
717 # delayed writing re-raise "ambiguous timestamp issue".
715 # See also the wiki page below for detail:
718 # See also the wiki page below for detail:
716 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
719 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
717
720
718 # emulate dropping timestamp in 'parsers.pack_dirstate'
721 # emulate dropping timestamp in 'parsers.pack_dirstate'
719 now = _getfsnow(self._opener)
722 now = _getfsnow(self._opener)
720 dmap = self._map
723 dmap = self._map
721 for f, e in dmap.iteritems():
724 for f, e in dmap.iteritems():
722 if e[0] == 'n' and e[3] == now:
725 if e[0] == 'n' and e[3] == now:
723 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
726 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
724 self._nonnormalset.add(f)
727 self._nonnormalset.add(f)
725
728
726 # emulate that all 'dirstate.normal' results are written out
729 # emulate that all 'dirstate.normal' results are written out
727 self._lastnormaltime = 0
730 self._lastnormaltime = 0
728
731
729 # delay writing in-memory changes out
732 # delay writing in-memory changes out
730 tr.addfilegenerator('dirstate', (self._filename,),
733 tr.addfilegenerator('dirstate', (self._filename,),
731 self._writedirstate, location='plain')
734 self._writedirstate, location='plain')
732 return
735 return
733
736
734 st = self._opener(filename, "w", atomictemp=True)
737 st = self._opener(filename, "w", atomictemp=True)
735 self._writedirstate(st)
738 self._writedirstate(st)
736
739
737 def _writedirstate(self, st):
740 def _writedirstate(self, st):
738 # use the modification time of the newly created temporary file as the
741 # use the modification time of the newly created temporary file as the
739 # filesystem's notion of 'now'
742 # filesystem's notion of 'now'
740 now = util.fstat(st).st_mtime & _rangemask
743 now = util.fstat(st).st_mtime & _rangemask
741
744
742 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
745 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
743 # timestamp of each entries in dirstate, because of 'now > mtime'
746 # timestamp of each entries in dirstate, because of 'now > mtime'
744 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
747 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
745 if delaywrite > 0:
748 if delaywrite > 0:
746 # do we have any files to delay for?
749 # do we have any files to delay for?
747 for f, e in self._map.iteritems():
750 for f, e in self._map.iteritems():
748 if e[0] == 'n' and e[3] == now:
751 if e[0] == 'n' and e[3] == now:
749 import time # to avoid useless import
752 import time # to avoid useless import
750 # rather than sleep n seconds, sleep until the next
753 # rather than sleep n seconds, sleep until the next
751 # multiple of n seconds
754 # multiple of n seconds
752 clock = time.time()
755 clock = time.time()
753 start = int(clock) - (int(clock) % delaywrite)
756 start = int(clock) - (int(clock) % delaywrite)
754 end = start + delaywrite
757 end = start + delaywrite
755 time.sleep(end - clock)
758 time.sleep(end - clock)
756 break
759 break
757
760
758 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
761 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
759 self._nonnormalset = nonnormalentries(self._map)
762 self._nonnormalset = nonnormalentries(self._map)
760 st.close()
763 st.close()
761 self._lastnormaltime = 0
764 self._lastnormaltime = 0
762 self._dirty = self._dirtypl = False
765 self._dirty = self._dirtypl = False
763
766
764 def _dirignore(self, f):
767 def _dirignore(self, f):
765 if f == '.':
768 if f == '.':
766 return False
769 return False
767 if self._ignore(f):
770 if self._ignore(f):
768 return True
771 return True
769 for p in util.finddirs(f):
772 for p in util.finddirs(f):
770 if self._ignore(p):
773 if self._ignore(p):
771 return True
774 return True
772 return False
775 return False
773
776
774 def _walkexplicit(self, match, subrepos):
777 def _walkexplicit(self, match, subrepos):
775 '''Get stat data about the files explicitly specified by match.
778 '''Get stat data about the files explicitly specified by match.
776
779
777 Return a triple (results, dirsfound, dirsnotfound).
780 Return a triple (results, dirsfound, dirsnotfound).
778 - results is a mapping from filename to stat result. It also contains
781 - results is a mapping from filename to stat result. It also contains
779 listings mapping subrepos and .hg to None.
782 listings mapping subrepos and .hg to None.
780 - dirsfound is a list of files found to be directories.
783 - dirsfound is a list of files found to be directories.
781 - dirsnotfound is a list of files that the dirstate thinks are
784 - dirsnotfound is a list of files that the dirstate thinks are
782 directories and that were not found.'''
785 directories and that were not found.'''
783
786
784 def badtype(mode):
787 def badtype(mode):
785 kind = _('unknown')
788 kind = _('unknown')
786 if stat.S_ISCHR(mode):
789 if stat.S_ISCHR(mode):
787 kind = _('character device')
790 kind = _('character device')
788 elif stat.S_ISBLK(mode):
791 elif stat.S_ISBLK(mode):
789 kind = _('block device')
792 kind = _('block device')
790 elif stat.S_ISFIFO(mode):
793 elif stat.S_ISFIFO(mode):
791 kind = _('fifo')
794 kind = _('fifo')
792 elif stat.S_ISSOCK(mode):
795 elif stat.S_ISSOCK(mode):
793 kind = _('socket')
796 kind = _('socket')
794 elif stat.S_ISDIR(mode):
797 elif stat.S_ISDIR(mode):
795 kind = _('directory')
798 kind = _('directory')
796 return _('unsupported file type (type is %s)') % kind
799 return _('unsupported file type (type is %s)') % kind
797
800
798 matchedir = match.explicitdir
801 matchedir = match.explicitdir
799 badfn = match.bad
802 badfn = match.bad
800 dmap = self._map
803 dmap = self._map
801 lstat = os.lstat
804 lstat = os.lstat
802 getkind = stat.S_IFMT
805 getkind = stat.S_IFMT
803 dirkind = stat.S_IFDIR
806 dirkind = stat.S_IFDIR
804 regkind = stat.S_IFREG
807 regkind = stat.S_IFREG
805 lnkkind = stat.S_IFLNK
808 lnkkind = stat.S_IFLNK
806 join = self._join
809 join = self._join
807 dirsfound = []
810 dirsfound = []
808 foundadd = dirsfound.append
811 foundadd = dirsfound.append
809 dirsnotfound = []
812 dirsnotfound = []
810 notfoundadd = dirsnotfound.append
813 notfoundadd = dirsnotfound.append
811
814
812 if not match.isexact() and self._checkcase:
815 if not match.isexact() and self._checkcase:
813 normalize = self._normalize
816 normalize = self._normalize
814 else:
817 else:
815 normalize = None
818 normalize = None
816
819
817 files = sorted(match.files())
820 files = sorted(match.files())
818 subrepos.sort()
821 subrepos.sort()
819 i, j = 0, 0
822 i, j = 0, 0
820 while i < len(files) and j < len(subrepos):
823 while i < len(files) and j < len(subrepos):
821 subpath = subrepos[j] + "/"
824 subpath = subrepos[j] + "/"
822 if files[i] < subpath:
825 if files[i] < subpath:
823 i += 1
826 i += 1
824 continue
827 continue
825 while i < len(files) and files[i].startswith(subpath):
828 while i < len(files) and files[i].startswith(subpath):
826 del files[i]
829 del files[i]
827 j += 1
830 j += 1
828
831
829 if not files or '.' in files:
832 if not files or '.' in files:
830 files = ['.']
833 files = ['.']
831 results = dict.fromkeys(subrepos)
834 results = dict.fromkeys(subrepos)
832 results['.hg'] = None
835 results['.hg'] = None
833
836
834 alldirs = None
837 alldirs = None
835 for ff in files:
838 for ff in files:
836 # constructing the foldmap is expensive, so don't do it for the
839 # constructing the foldmap is expensive, so don't do it for the
837 # common case where files is ['.']
840 # common case where files is ['.']
838 if normalize and ff != '.':
841 if normalize and ff != '.':
839 nf = normalize(ff, False, True)
842 nf = normalize(ff, False, True)
840 else:
843 else:
841 nf = ff
844 nf = ff
842 if nf in results:
845 if nf in results:
843 continue
846 continue
844
847
845 try:
848 try:
846 st = lstat(join(nf))
849 st = lstat(join(nf))
847 kind = getkind(st.st_mode)
850 kind = getkind(st.st_mode)
848 if kind == dirkind:
851 if kind == dirkind:
849 if nf in dmap:
852 if nf in dmap:
850 # file replaced by dir on disk but still in dirstate
853 # file replaced by dir on disk but still in dirstate
851 results[nf] = None
854 results[nf] = None
852 if matchedir:
855 if matchedir:
853 matchedir(nf)
856 matchedir(nf)
854 foundadd((nf, ff))
857 foundadd((nf, ff))
855 elif kind == regkind or kind == lnkkind:
858 elif kind == regkind or kind == lnkkind:
856 results[nf] = st
859 results[nf] = st
857 else:
860 else:
858 badfn(ff, badtype(kind))
861 badfn(ff, badtype(kind))
859 if nf in dmap:
862 if nf in dmap:
860 results[nf] = None
863 results[nf] = None
861 except OSError as inst: # nf not found on disk - it is dirstate only
864 except OSError as inst: # nf not found on disk - it is dirstate only
862 if nf in dmap: # does it exactly match a missing file?
865 if nf in dmap: # does it exactly match a missing file?
863 results[nf] = None
866 results[nf] = None
864 else: # does it match a missing directory?
867 else: # does it match a missing directory?
865 if alldirs is None:
868 if alldirs is None:
866 alldirs = util.dirs(dmap)
869 alldirs = util.dirs(dmap)
867 if nf in alldirs:
870 if nf in alldirs:
868 if matchedir:
871 if matchedir:
869 matchedir(nf)
872 matchedir(nf)
870 notfoundadd(nf)
873 notfoundadd(nf)
871 else:
874 else:
872 badfn(ff, inst.strerror)
875 badfn(ff, inst.strerror)
873
876
874 # Case insensitive filesystems cannot rely on lstat() failing to detect
877 # Case insensitive filesystems cannot rely on lstat() failing to detect
875 # a case-only rename. Prune the stat object for any file that does not
878 # a case-only rename. Prune the stat object for any file that does not
876 # match the case in the filesystem, if there are multiple files that
879 # match the case in the filesystem, if there are multiple files that
877 # normalize to the same path.
880 # normalize to the same path.
878 if match.isexact() and self._checkcase:
881 if match.isexact() and self._checkcase:
879 normed = {}
882 normed = {}
880
883
881 for f, st in results.iteritems():
884 for f, st in results.iteritems():
882 if st is None:
885 if st is None:
883 continue
886 continue
884
887
885 nc = util.normcase(f)
888 nc = util.normcase(f)
886 paths = normed.get(nc)
889 paths = normed.get(nc)
887
890
888 if paths is None:
891 if paths is None:
889 paths = set()
892 paths = set()
890 normed[nc] = paths
893 normed[nc] = paths
891
894
892 paths.add(f)
895 paths.add(f)
893
896
894 for norm, paths in normed.iteritems():
897 for norm, paths in normed.iteritems():
895 if len(paths) > 1:
898 if len(paths) > 1:
896 for path in paths:
899 for path in paths:
897 folded = self._discoverpath(path, norm, True, None,
900 folded = self._discoverpath(path, norm, True, None,
898 self._dirfoldmap)
901 self._dirfoldmap)
899 if path != folded:
902 if path != folded:
900 results[path] = None
903 results[path] = None
901
904
902 return results, dirsfound, dirsnotfound
905 return results, dirsfound, dirsnotfound
903
906
904 def walk(self, match, subrepos, unknown, ignored, full=True):
907 def walk(self, match, subrepos, unknown, ignored, full=True):
905 '''
908 '''
906 Walk recursively through the directory tree, finding all files
909 Walk recursively through the directory tree, finding all files
907 matched by match.
910 matched by match.
908
911
909 If full is False, maybe skip some known-clean files.
912 If full is False, maybe skip some known-clean files.
910
913
911 Return a dict mapping filename to stat-like object (either
914 Return a dict mapping filename to stat-like object (either
912 mercurial.osutil.stat instance or return value of os.stat()).
915 mercurial.osutil.stat instance or return value of os.stat()).
913
916
914 '''
917 '''
915 # full is a flag that extensions that hook into walk can use -- this
918 # full is a flag that extensions that hook into walk can use -- this
916 # implementation doesn't use it at all. This satisfies the contract
919 # implementation doesn't use it at all. This satisfies the contract
917 # because we only guarantee a "maybe".
920 # because we only guarantee a "maybe".
918
921
919 if ignored:
922 if ignored:
920 ignore = util.never
923 ignore = util.never
921 dirignore = util.never
924 dirignore = util.never
922 elif unknown:
925 elif unknown:
923 ignore = self._ignore
926 ignore = self._ignore
924 dirignore = self._dirignore
927 dirignore = self._dirignore
925 else:
928 else:
926 # if not unknown and not ignored, drop dir recursion and step 2
929 # if not unknown and not ignored, drop dir recursion and step 2
927 ignore = util.always
930 ignore = util.always
928 dirignore = util.always
931 dirignore = util.always
929
932
930 matchfn = match.matchfn
933 matchfn = match.matchfn
931 matchalways = match.always()
934 matchalways = match.always()
932 matchtdir = match.traversedir
935 matchtdir = match.traversedir
933 dmap = self._map
936 dmap = self._map
934 listdir = osutil.listdir
937 listdir = osutil.listdir
935 lstat = os.lstat
938 lstat = os.lstat
936 dirkind = stat.S_IFDIR
939 dirkind = stat.S_IFDIR
937 regkind = stat.S_IFREG
940 regkind = stat.S_IFREG
938 lnkkind = stat.S_IFLNK
941 lnkkind = stat.S_IFLNK
939 join = self._join
942 join = self._join
940
943
941 exact = skipstep3 = False
944 exact = skipstep3 = False
942 if match.isexact(): # match.exact
945 if match.isexact(): # match.exact
943 exact = True
946 exact = True
944 dirignore = util.always # skip step 2
947 dirignore = util.always # skip step 2
945 elif match.prefix(): # match.match, no patterns
948 elif match.prefix(): # match.match, no patterns
946 skipstep3 = True
949 skipstep3 = True
947
950
948 if not exact and self._checkcase:
951 if not exact and self._checkcase:
949 normalize = self._normalize
952 normalize = self._normalize
950 normalizefile = self._normalizefile
953 normalizefile = self._normalizefile
951 skipstep3 = False
954 skipstep3 = False
952 else:
955 else:
953 normalize = self._normalize
956 normalize = self._normalize
954 normalizefile = None
957 normalizefile = None
955
958
956 # step 1: find all explicit files
959 # step 1: find all explicit files
957 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
960 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
958
961
959 skipstep3 = skipstep3 and not (work or dirsnotfound)
962 skipstep3 = skipstep3 and not (work or dirsnotfound)
960 work = [d for d in work if not dirignore(d[0])]
963 work = [d for d in work if not dirignore(d[0])]
961
964
962 # step 2: visit subdirectories
965 # step 2: visit subdirectories
963 def traverse(work, alreadynormed):
966 def traverse(work, alreadynormed):
964 wadd = work.append
967 wadd = work.append
965 while work:
968 while work:
966 nd = work.pop()
969 nd = work.pop()
967 skip = None
970 skip = None
968 if nd == '.':
971 if nd == '.':
969 nd = ''
972 nd = ''
970 else:
973 else:
971 skip = '.hg'
974 skip = '.hg'
972 try:
975 try:
973 entries = listdir(join(nd), stat=True, skip=skip)
976 entries = listdir(join(nd), stat=True, skip=skip)
974 except OSError as inst:
977 except OSError as inst:
975 if inst.errno in (errno.EACCES, errno.ENOENT):
978 if inst.errno in (errno.EACCES, errno.ENOENT):
976 match.bad(self.pathto(nd), inst.strerror)
979 match.bad(self.pathto(nd), inst.strerror)
977 continue
980 continue
978 raise
981 raise
979 for f, kind, st in entries:
982 for f, kind, st in entries:
980 if normalizefile:
983 if normalizefile:
981 # even though f might be a directory, we're only
984 # even though f might be a directory, we're only
982 # interested in comparing it to files currently in the
985 # interested in comparing it to files currently in the
983 # dmap -- therefore normalizefile is enough
986 # dmap -- therefore normalizefile is enough
984 nf = normalizefile(nd and (nd + "/" + f) or f, True,
987 nf = normalizefile(nd and (nd + "/" + f) or f, True,
985 True)
988 True)
986 else:
989 else:
987 nf = nd and (nd + "/" + f) or f
990 nf = nd and (nd + "/" + f) or f
988 if nf not in results:
991 if nf not in results:
989 if kind == dirkind:
992 if kind == dirkind:
990 if not ignore(nf):
993 if not ignore(nf):
991 if matchtdir:
994 if matchtdir:
992 matchtdir(nf)
995 matchtdir(nf)
993 wadd(nf)
996 wadd(nf)
994 if nf in dmap and (matchalways or matchfn(nf)):
997 if nf in dmap and (matchalways or matchfn(nf)):
995 results[nf] = None
998 results[nf] = None
996 elif kind == regkind or kind == lnkkind:
999 elif kind == regkind or kind == lnkkind:
997 if nf in dmap:
1000 if nf in dmap:
998 if matchalways or matchfn(nf):
1001 if matchalways or matchfn(nf):
999 results[nf] = st
1002 results[nf] = st
1000 elif ((matchalways or matchfn(nf))
1003 elif ((matchalways or matchfn(nf))
1001 and not ignore(nf)):
1004 and not ignore(nf)):
1002 # unknown file -- normalize if necessary
1005 # unknown file -- normalize if necessary
1003 if not alreadynormed:
1006 if not alreadynormed:
1004 nf = normalize(nf, False, True)
1007 nf = normalize(nf, False, True)
1005 results[nf] = st
1008 results[nf] = st
1006 elif nf in dmap and (matchalways or matchfn(nf)):
1009 elif nf in dmap and (matchalways or matchfn(nf)):
1007 results[nf] = None
1010 results[nf] = None
1008
1011
1009 for nd, d in work:
1012 for nd, d in work:
1010 # alreadynormed means that processwork doesn't have to do any
1013 # alreadynormed means that processwork doesn't have to do any
1011 # expensive directory normalization
1014 # expensive directory normalization
1012 alreadynormed = not normalize or nd == d
1015 alreadynormed = not normalize or nd == d
1013 traverse([d], alreadynormed)
1016 traverse([d], alreadynormed)
1014
1017
1015 for s in subrepos:
1018 for s in subrepos:
1016 del results[s]
1019 del results[s]
1017 del results['.hg']
1020 del results['.hg']
1018
1021
1019 # step 3: visit remaining files from dmap
1022 # step 3: visit remaining files from dmap
1020 if not skipstep3 and not exact:
1023 if not skipstep3 and not exact:
1021 # If a dmap file is not in results yet, it was either
1024 # If a dmap file is not in results yet, it was either
1022 # a) not matching matchfn b) ignored, c) missing, or d) under a
1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1023 # symlink directory.
1026 # symlink directory.
1024 if not results and matchalways:
1027 if not results and matchalways:
1025 visit = dmap.keys()
1028 visit = dmap.keys()
1026 else:
1029 else:
1027 visit = [f for f in dmap if f not in results and matchfn(f)]
1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1028 visit.sort()
1031 visit.sort()
1029
1032
1030 if unknown:
1033 if unknown:
1031 # unknown == True means we walked all dirs under the roots
1034 # unknown == True means we walked all dirs under the roots
1032 # that wasn't ignored, and everything that matched was stat'ed
1035 # that wasn't ignored, and everything that matched was stat'ed
1033 # and is already in results.
1036 # and is already in results.
1034 # The rest must thus be ignored or under a symlink.
1037 # The rest must thus be ignored or under a symlink.
1035 audit_path = pathutil.pathauditor(self._root)
1038 audit_path = pathutil.pathauditor(self._root)
1036
1039
1037 for nf in iter(visit):
1040 for nf in iter(visit):
1038 # If a stat for the same file was already added with a
1041 # If a stat for the same file was already added with a
1039 # different case, don't add one for this, since that would
1042 # different case, don't add one for this, since that would
1040 # make it appear as if the file exists under both names
1043 # make it appear as if the file exists under both names
1041 # on disk.
1044 # on disk.
1042 if (normalizefile and
1045 if (normalizefile and
1043 normalizefile(nf, True, True) in results):
1046 normalizefile(nf, True, True) in results):
1044 results[nf] = None
1047 results[nf] = None
1045 # Report ignored items in the dmap as long as they are not
1048 # Report ignored items in the dmap as long as they are not
1046 # under a symlink directory.
1049 # under a symlink directory.
1047 elif audit_path.check(nf):
1050 elif audit_path.check(nf):
1048 try:
1051 try:
1049 results[nf] = lstat(join(nf))
1052 results[nf] = lstat(join(nf))
1050 # file was just ignored, no links, and exists
1053 # file was just ignored, no links, and exists
1051 except OSError:
1054 except OSError:
1052 # file doesn't exist
1055 # file doesn't exist
1053 results[nf] = None
1056 results[nf] = None
1054 else:
1057 else:
1055 # It's either missing or under a symlink directory
1058 # It's either missing or under a symlink directory
1056 # which we in this case report as missing
1059 # which we in this case report as missing
1057 results[nf] = None
1060 results[nf] = None
1058 else:
1061 else:
1059 # We may not have walked the full directory tree above,
1062 # We may not have walked the full directory tree above,
1060 # so stat and check everything we missed.
1063 # so stat and check everything we missed.
1061 nf = iter(visit).next
1064 nf = iter(visit).next
1062 for st in util.statfiles([join(i) for i in visit]):
1065 for st in util.statfiles([join(i) for i in visit]):
1063 results[nf()] = st
1066 results[nf()] = st
1064 return results
1067 return results
1065
1068
1066 def status(self, match, subrepos, ignored, clean, unknown):
1069 def status(self, match, subrepos, ignored, clean, unknown):
1067 '''Determine the status of the working copy relative to the
1070 '''Determine the status of the working copy relative to the
1068 dirstate and return a pair of (unsure, status), where status is of type
1071 dirstate and return a pair of (unsure, status), where status is of type
1069 scmutil.status and:
1072 scmutil.status and:
1070
1073
1071 unsure:
1074 unsure:
1072 files that might have been modified since the dirstate was
1075 files that might have been modified since the dirstate was
1073 written, but need to be read to be sure (size is the same
1076 written, but need to be read to be sure (size is the same
1074 but mtime differs)
1077 but mtime differs)
1075 status.modified:
1078 status.modified:
1076 files that have definitely been modified since the dirstate
1079 files that have definitely been modified since the dirstate
1077 was written (different size or mode)
1080 was written (different size or mode)
1078 status.clean:
1081 status.clean:
1079 files that have definitely not been modified since the
1082 files that have definitely not been modified since the
1080 dirstate was written
1083 dirstate was written
1081 '''
1084 '''
1082 listignored, listclean, listunknown = ignored, clean, unknown
1085 listignored, listclean, listunknown = ignored, clean, unknown
1083 lookup, modified, added, unknown, ignored = [], [], [], [], []
1086 lookup, modified, added, unknown, ignored = [], [], [], [], []
1084 removed, deleted, clean = [], [], []
1087 removed, deleted, clean = [], [], []
1085
1088
1086 dmap = self._map
1089 dmap = self._map
1087 ladd = lookup.append # aka "unsure"
1090 ladd = lookup.append # aka "unsure"
1088 madd = modified.append
1091 madd = modified.append
1089 aadd = added.append
1092 aadd = added.append
1090 uadd = unknown.append
1093 uadd = unknown.append
1091 iadd = ignored.append
1094 iadd = ignored.append
1092 radd = removed.append
1095 radd = removed.append
1093 dadd = deleted.append
1096 dadd = deleted.append
1094 cadd = clean.append
1097 cadd = clean.append
1095 mexact = match.exact
1098 mexact = match.exact
1096 dirignore = self._dirignore
1099 dirignore = self._dirignore
1097 checkexec = self._checkexec
1100 checkexec = self._checkexec
1098 copymap = self._copymap
1101 copymap = self._copymap
1099 lastnormaltime = self._lastnormaltime
1102 lastnormaltime = self._lastnormaltime
1100
1103
1101 # We need to do full walks when either
1104 # We need to do full walks when either
1102 # - we're listing all clean files, or
1105 # - we're listing all clean files, or
1103 # - match.traversedir does something, because match.traversedir should
1106 # - match.traversedir does something, because match.traversedir should
1104 # be called for every dir in the working dir
1107 # be called for every dir in the working dir
1105 full = listclean or match.traversedir is not None
1108 full = listclean or match.traversedir is not None
1106 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1109 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1107 full=full).iteritems():
1110 full=full).iteritems():
1108 if fn not in dmap:
1111 if fn not in dmap:
1109 if (listignored or mexact(fn)) and dirignore(fn):
1112 if (listignored or mexact(fn)) and dirignore(fn):
1110 if listignored:
1113 if listignored:
1111 iadd(fn)
1114 iadd(fn)
1112 else:
1115 else:
1113 uadd(fn)
1116 uadd(fn)
1114 continue
1117 continue
1115
1118
1116 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1119 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1117 # written like that for performance reasons. dmap[fn] is not a
1120 # written like that for performance reasons. dmap[fn] is not a
1118 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1121 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1119 # opcode has fast paths when the value to be unpacked is a tuple or
1122 # opcode has fast paths when the value to be unpacked is a tuple or
1120 # a list, but falls back to creating a full-fledged iterator in
1123 # a list, but falls back to creating a full-fledged iterator in
1121 # general. That is much slower than simply accessing and storing the
1124 # general. That is much slower than simply accessing and storing the
1122 # tuple members one by one.
1125 # tuple members one by one.
1123 t = dmap[fn]
1126 t = dmap[fn]
1124 state = t[0]
1127 state = t[0]
1125 mode = t[1]
1128 mode = t[1]
1126 size = t[2]
1129 size = t[2]
1127 time = t[3]
1130 time = t[3]
1128
1131
1129 if not st and state in "nma":
1132 if not st and state in "nma":
1130 dadd(fn)
1133 dadd(fn)
1131 elif state == 'n':
1134 elif state == 'n':
1132 if (size >= 0 and
1135 if (size >= 0 and
1133 ((size != st.st_size and size != st.st_size & _rangemask)
1136 ((size != st.st_size and size != st.st_size & _rangemask)
1134 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1137 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1135 or size == -2 # other parent
1138 or size == -2 # other parent
1136 or fn in copymap):
1139 or fn in copymap):
1137 madd(fn)
1140 madd(fn)
1138 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1141 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1139 ladd(fn)
1142 ladd(fn)
1140 elif st.st_mtime == lastnormaltime:
1143 elif st.st_mtime == lastnormaltime:
1141 # fn may have just been marked as normal and it may have
1144 # fn may have just been marked as normal and it may have
1142 # changed in the same second without changing its size.
1145 # changed in the same second without changing its size.
1143 # This can happen if we quickly do multiple commits.
1146 # This can happen if we quickly do multiple commits.
1144 # Force lookup, so we don't miss such a racy file change.
1147 # Force lookup, so we don't miss such a racy file change.
1145 ladd(fn)
1148 ladd(fn)
1146 elif listclean:
1149 elif listclean:
1147 cadd(fn)
1150 cadd(fn)
1148 elif state == 'm':
1151 elif state == 'm':
1149 madd(fn)
1152 madd(fn)
1150 elif state == 'a':
1153 elif state == 'a':
1151 aadd(fn)
1154 aadd(fn)
1152 elif state == 'r':
1155 elif state == 'r':
1153 radd(fn)
1156 radd(fn)
1154
1157
1155 return (lookup, scmutil.status(modified, added, removed, deleted,
1158 return (lookup, scmutil.status(modified, added, removed, deleted,
1156 unknown, ignored, clean))
1159 unknown, ignored, clean))
1157
1160
1158 def matches(self, match):
1161 def matches(self, match):
1159 '''
1162 '''
1160 return files in the dirstate (in whatever state) filtered by match
1163 return files in the dirstate (in whatever state) filtered by match
1161 '''
1164 '''
1162 dmap = self._map
1165 dmap = self._map
1163 if match.always():
1166 if match.always():
1164 return dmap.keys()
1167 return dmap.keys()
1165 files = match.files()
1168 files = match.files()
1166 if match.isexact():
1169 if match.isexact():
1167 # fast path -- filter the other way around, since typically files is
1170 # fast path -- filter the other way around, since typically files is
1168 # much smaller than dmap
1171 # much smaller than dmap
1169 return [f for f in files if f in dmap]
1172 return [f for f in files if f in dmap]
1170 if match.prefix() and all(fn in dmap for fn in files):
1173 if match.prefix() and all(fn in dmap for fn in files):
1171 # fast path -- all the values are known to be files, so just return
1174 # fast path -- all the values are known to be files, so just return
1172 # that
1175 # that
1173 return list(files)
1176 return list(files)
1174 return [f for f in dmap if match(f)]
1177 return [f for f in dmap if match(f)]
1175
1178
1176 def _actualfilename(self, tr):
1179 def _actualfilename(self, tr):
1177 if tr:
1180 if tr:
1178 return self._pendingfilename
1181 return self._pendingfilename
1179 else:
1182 else:
1180 return self._filename
1183 return self._filename
1181
1184
1182 def _savebackup(self, tr, suffix):
1185 def _savebackup(self, tr, suffix):
1183 '''Save current dirstate into backup file with suffix'''
1186 '''Save current dirstate into backup file with suffix'''
1184 filename = self._actualfilename(tr)
1187 filename = self._actualfilename(tr)
1185
1188
1186 # use '_writedirstate' instead of 'write' to write changes certainly,
1189 # use '_writedirstate' instead of 'write' to write changes certainly,
1187 # because the latter omits writing out if transaction is running.
1190 # because the latter omits writing out if transaction is running.
1188 # output file will be used to create backup of dirstate at this point.
1191 # output file will be used to create backup of dirstate at this point.
1189 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1192 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1190
1193
1191 if tr:
1194 if tr:
1192 # ensure that subsequent tr.writepending returns True for
1195 # ensure that subsequent tr.writepending returns True for
1193 # changes written out above, even if dirstate is never
1196 # changes written out above, even if dirstate is never
1194 # changed after this
1197 # changed after this
1195 tr.addfilegenerator('dirstate', (self._filename,),
1198 tr.addfilegenerator('dirstate', (self._filename,),
1196 self._writedirstate, location='plain')
1199 self._writedirstate, location='plain')
1197
1200
1198 # ensure that pending file written above is unlinked at
1201 # ensure that pending file written above is unlinked at
1199 # failure, even if tr.writepending isn't invoked until the
1202 # failure, even if tr.writepending isn't invoked until the
1200 # end of this transaction
1203 # end of this transaction
1201 tr.registertmp(filename, location='plain')
1204 tr.registertmp(filename, location='plain')
1202
1205
1203 self._opener.write(filename + suffix, self._opener.tryread(filename))
1206 self._opener.write(filename + suffix, self._opener.tryread(filename))
1204
1207
1205 def _restorebackup(self, tr, suffix):
1208 def _restorebackup(self, tr, suffix):
1206 '''Restore dirstate by backup file with suffix'''
1209 '''Restore dirstate by backup file with suffix'''
1207 # this "invalidate()" prevents "wlock.release()" from writing
1210 # this "invalidate()" prevents "wlock.release()" from writing
1208 # changes of dirstate out after restoring from backup file
1211 # changes of dirstate out after restoring from backup file
1209 self.invalidate()
1212 self.invalidate()
1210 filename = self._actualfilename(tr)
1213 filename = self._actualfilename(tr)
1211 self._opener.rename(filename + suffix, filename)
1214 self._opener.rename(filename + suffix, filename)
1212
1215
1213 def _clearbackup(self, tr, suffix):
1216 def _clearbackup(self, tr, suffix):
1214 '''Clear backup file with suffix'''
1217 '''Clear backup file with suffix'''
1215 filename = self._actualfilename(tr)
1218 filename = self._actualfilename(tr)
1216 self._opener.unlink(filename + suffix)
1219 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now