##// END OF EJS Templates
dirstate: use tryunlink
Ryan McElroy -
r31547:ddadb6b0 default
parent child Browse files
Show More
@@ -1,1287 +1,1286 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 propertycache = util.propertycache
30 propertycache = util.propertycache
31 filecache = scmutil.filecache
31 filecache = scmutil.filecache
32 _rangemask = 0x7fffffff
32 _rangemask = 0x7fffffff
33
33
34 dirstatetuple = parsers.dirstatetuple
34 dirstatetuple = parsers.dirstatetuple
35
35
36 class repocache(filecache):
36 class repocache(filecache):
37 """filecache for files in .hg/"""
37 """filecache for files in .hg/"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj._opener.join(fname)
39 return obj._opener.join(fname)
40
40
41 class rootcache(filecache):
41 class rootcache(filecache):
42 """filecache for files in the repository root"""
42 """filecache for files in the repository root"""
43 def join(self, obj, fname):
43 def join(self, obj, fname):
44 return obj._join(fname)
44 return obj._join(fname)
45
45
46 def _getfsnow(vfs):
46 def _getfsnow(vfs):
47 '''Get "now" timestamp on filesystem'''
47 '''Get "now" timestamp on filesystem'''
48 tmpfd, tmpname = vfs.mkstemp()
48 tmpfd, tmpname = vfs.mkstemp()
49 try:
49 try:
50 return os.fstat(tmpfd).st_mtime
50 return os.fstat(tmpfd).st_mtime
51 finally:
51 finally:
52 os.close(tmpfd)
52 os.close(tmpfd)
53 vfs.unlink(tmpname)
53 vfs.unlink(tmpname)
54
54
55 def nonnormalentries(dmap):
55 def nonnormalentries(dmap):
56 '''Compute the nonnormal dirstate entries from the dmap'''
56 '''Compute the nonnormal dirstate entries from the dmap'''
57 try:
57 try:
58 return parsers.nonnormalotherparententries(dmap)
58 return parsers.nonnormalotherparententries(dmap)
59 except AttributeError:
59 except AttributeError:
60 nonnorm = set()
60 nonnorm = set()
61 otherparent = set()
61 otherparent = set()
62 for fname, e in dmap.iteritems():
62 for fname, e in dmap.iteritems():
63 if e[0] != 'n' or e[3] == -1:
63 if e[0] != 'n' or e[3] == -1:
64 nonnorm.add(fname)
64 nonnorm.add(fname)
65 if e[0] == 'n' and e[2] == -2:
65 if e[0] == 'n' and e[2] == -2:
66 otherparent.add(fname)
66 otherparent.add(fname)
67 return nonnorm, otherparent
67 return nonnorm, otherparent
68
68
69 class dirstate(object):
69 class dirstate(object):
70
70
71 def __init__(self, opener, ui, root, validate):
71 def __init__(self, opener, ui, root, validate):
72 '''Create a new dirstate object.
72 '''Create a new dirstate object.
73
73
74 opener is an open()-like callable that can be used to open the
74 opener is an open()-like callable that can be used to open the
75 dirstate file; root is the root of the directory tracked by
75 dirstate file; root is the root of the directory tracked by
76 the dirstate.
76 the dirstate.
77 '''
77 '''
78 self._opener = opener
78 self._opener = opener
79 self._validate = validate
79 self._validate = validate
80 self._root = root
80 self._root = root
81 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
81 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
82 # UNC path pointing to root share (issue4557)
82 # UNC path pointing to root share (issue4557)
83 self._rootdir = pathutil.normasprefix(root)
83 self._rootdir = pathutil.normasprefix(root)
84 # internal config: ui.forcecwd
84 # internal config: ui.forcecwd
85 forcecwd = ui.config('ui', 'forcecwd')
85 forcecwd = ui.config('ui', 'forcecwd')
86 if forcecwd:
86 if forcecwd:
87 self._cwd = forcecwd
87 self._cwd = forcecwd
88 self._dirty = False
88 self._dirty = False
89 self._dirtypl = False
89 self._dirtypl = False
90 self._lastnormaltime = 0
90 self._lastnormaltime = 0
91 self._ui = ui
91 self._ui = ui
92 self._filecache = {}
92 self._filecache = {}
93 self._parentwriters = 0
93 self._parentwriters = 0
94 self._filename = 'dirstate'
94 self._filename = 'dirstate'
95 self._pendingfilename = '%s.pending' % self._filename
95 self._pendingfilename = '%s.pending' % self._filename
96 self._plchangecallbacks = {}
96 self._plchangecallbacks = {}
97 self._origpl = None
97 self._origpl = None
98 self._updatedfiles = set()
98 self._updatedfiles = set()
99
99
100 # for consistent view between _pl() and _read() invocations
100 # for consistent view between _pl() and _read() invocations
101 self._pendingmode = None
101 self._pendingmode = None
102
102
103 def beginparentchange(self):
103 def beginparentchange(self):
104 '''Marks the beginning of a set of changes that involve changing
104 '''Marks the beginning of a set of changes that involve changing
105 the dirstate parents. If there is an exception during this time,
105 the dirstate parents. If there is an exception during this time,
106 the dirstate will not be written when the wlock is released. This
106 the dirstate will not be written when the wlock is released. This
107 prevents writing an incoherent dirstate where the parent doesn't
107 prevents writing an incoherent dirstate where the parent doesn't
108 match the contents.
108 match the contents.
109 '''
109 '''
110 self._parentwriters += 1
110 self._parentwriters += 1
111
111
112 def endparentchange(self):
112 def endparentchange(self):
113 '''Marks the end of a set of changes that involve changing the
113 '''Marks the end of a set of changes that involve changing the
114 dirstate parents. Once all parent changes have been marked done,
114 dirstate parents. Once all parent changes have been marked done,
115 the wlock will be free to write the dirstate on release.
115 the wlock will be free to write the dirstate on release.
116 '''
116 '''
117 if self._parentwriters > 0:
117 if self._parentwriters > 0:
118 self._parentwriters -= 1
118 self._parentwriters -= 1
119
119
120 def pendingparentchange(self):
120 def pendingparentchange(self):
121 '''Returns true if the dirstate is in the middle of a set of changes
121 '''Returns true if the dirstate is in the middle of a set of changes
122 that modify the dirstate parent.
122 that modify the dirstate parent.
123 '''
123 '''
124 return self._parentwriters > 0
124 return self._parentwriters > 0
125
125
126 @propertycache
126 @propertycache
127 def _map(self):
127 def _map(self):
128 '''Return the dirstate contents as a map from filename to
128 '''Return the dirstate contents as a map from filename to
129 (state, mode, size, time).'''
129 (state, mode, size, time).'''
130 self._read()
130 self._read()
131 return self._map
131 return self._map
132
132
133 @propertycache
133 @propertycache
134 def _copymap(self):
134 def _copymap(self):
135 self._read()
135 self._read()
136 return self._copymap
136 return self._copymap
137
137
138 @propertycache
138 @propertycache
139 def _nonnormalset(self):
139 def _nonnormalset(self):
140 nonnorm, otherparents = nonnormalentries(self._map)
140 nonnorm, otherparents = nonnormalentries(self._map)
141 self._otherparentset = otherparents
141 self._otherparentset = otherparents
142 return nonnorm
142 return nonnorm
143
143
144 @propertycache
144 @propertycache
145 def _otherparentset(self):
145 def _otherparentset(self):
146 nonnorm, otherparents = nonnormalentries(self._map)
146 nonnorm, otherparents = nonnormalentries(self._map)
147 self._nonnormalset = nonnorm
147 self._nonnormalset = nonnorm
148 return otherparents
148 return otherparents
149
149
150 @propertycache
150 @propertycache
151 def _filefoldmap(self):
151 def _filefoldmap(self):
152 try:
152 try:
153 makefilefoldmap = parsers.make_file_foldmap
153 makefilefoldmap = parsers.make_file_foldmap
154 except AttributeError:
154 except AttributeError:
155 pass
155 pass
156 else:
156 else:
157 return makefilefoldmap(self._map, util.normcasespec,
157 return makefilefoldmap(self._map, util.normcasespec,
158 util.normcasefallback)
158 util.normcasefallback)
159
159
160 f = {}
160 f = {}
161 normcase = util.normcase
161 normcase = util.normcase
162 for name, s in self._map.iteritems():
162 for name, s in self._map.iteritems():
163 if s[0] != 'r':
163 if s[0] != 'r':
164 f[normcase(name)] = name
164 f[normcase(name)] = name
165 f['.'] = '.' # prevents useless util.fspath() invocation
165 f['.'] = '.' # prevents useless util.fspath() invocation
166 return f
166 return f
167
167
168 @propertycache
168 @propertycache
169 def _dirfoldmap(self):
169 def _dirfoldmap(self):
170 f = {}
170 f = {}
171 normcase = util.normcase
171 normcase = util.normcase
172 for name in self._dirs:
172 for name in self._dirs:
173 f[normcase(name)] = name
173 f[normcase(name)] = name
174 return f
174 return f
175
175
176 @repocache('branch')
176 @repocache('branch')
177 def _branch(self):
177 def _branch(self):
178 try:
178 try:
179 return self._opener.read("branch").strip() or "default"
179 return self._opener.read("branch").strip() or "default"
180 except IOError as inst:
180 except IOError as inst:
181 if inst.errno != errno.ENOENT:
181 if inst.errno != errno.ENOENT:
182 raise
182 raise
183 return "default"
183 return "default"
184
184
185 @propertycache
185 @propertycache
186 def _pl(self):
186 def _pl(self):
187 try:
187 try:
188 fp = self._opendirstatefile()
188 fp = self._opendirstatefile()
189 st = fp.read(40)
189 st = fp.read(40)
190 fp.close()
190 fp.close()
191 l = len(st)
191 l = len(st)
192 if l == 40:
192 if l == 40:
193 return st[:20], st[20:40]
193 return st[:20], st[20:40]
194 elif l > 0 and l < 40:
194 elif l > 0 and l < 40:
195 raise error.Abort(_('working directory state appears damaged!'))
195 raise error.Abort(_('working directory state appears damaged!'))
196 except IOError as err:
196 except IOError as err:
197 if err.errno != errno.ENOENT:
197 if err.errno != errno.ENOENT:
198 raise
198 raise
199 return [nullid, nullid]
199 return [nullid, nullid]
200
200
201 @propertycache
201 @propertycache
202 def _dirs(self):
202 def _dirs(self):
203 return util.dirs(self._map, 'r')
203 return util.dirs(self._map, 'r')
204
204
205 def dirs(self):
205 def dirs(self):
206 return self._dirs
206 return self._dirs
207
207
208 @rootcache('.hgignore')
208 @rootcache('.hgignore')
209 def _ignore(self):
209 def _ignore(self):
210 files = self._ignorefiles()
210 files = self._ignorefiles()
211 if not files:
211 if not files:
212 return util.never
212 return util.never
213
213
214 pats = ['include:%s' % f for f in files]
214 pats = ['include:%s' % f for f in files]
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216
216
217 @propertycache
217 @propertycache
218 def _slash(self):
218 def _slash(self):
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
220
220
221 @propertycache
221 @propertycache
222 def _checklink(self):
222 def _checklink(self):
223 return util.checklink(self._root)
223 return util.checklink(self._root)
224
224
225 @propertycache
225 @propertycache
226 def _checkexec(self):
226 def _checkexec(self):
227 return util.checkexec(self._root)
227 return util.checkexec(self._root)
228
228
229 @propertycache
229 @propertycache
230 def _checkcase(self):
230 def _checkcase(self):
231 return not util.fscasesensitive(self._join('.hg'))
231 return not util.fscasesensitive(self._join('.hg'))
232
232
233 def _join(self, f):
233 def _join(self, f):
234 # much faster than os.path.join()
234 # much faster than os.path.join()
235 # it's safe because f is always a relative path
235 # it's safe because f is always a relative path
236 return self._rootdir + f
236 return self._rootdir + f
237
237
238 def flagfunc(self, buildfallback):
238 def flagfunc(self, buildfallback):
239 if self._checklink and self._checkexec:
239 if self._checklink and self._checkexec:
240 def f(x):
240 def f(x):
241 try:
241 try:
242 st = os.lstat(self._join(x))
242 st = os.lstat(self._join(x))
243 if util.statislink(st):
243 if util.statislink(st):
244 return 'l'
244 return 'l'
245 if util.statisexec(st):
245 if util.statisexec(st):
246 return 'x'
246 return 'x'
247 except OSError:
247 except OSError:
248 pass
248 pass
249 return ''
249 return ''
250 return f
250 return f
251
251
252 fallback = buildfallback()
252 fallback = buildfallback()
253 if self._checklink:
253 if self._checklink:
254 def f(x):
254 def f(x):
255 if os.path.islink(self._join(x)):
255 if os.path.islink(self._join(x)):
256 return 'l'
256 return 'l'
257 if 'x' in fallback(x):
257 if 'x' in fallback(x):
258 return 'x'
258 return 'x'
259 return ''
259 return ''
260 return f
260 return f
261 if self._checkexec:
261 if self._checkexec:
262 def f(x):
262 def f(x):
263 if 'l' in fallback(x):
263 if 'l' in fallback(x):
264 return 'l'
264 return 'l'
265 if util.isexec(self._join(x)):
265 if util.isexec(self._join(x)):
266 return 'x'
266 return 'x'
267 return ''
267 return ''
268 return f
268 return f
269 else:
269 else:
270 return fallback
270 return fallback
271
271
272 @propertycache
272 @propertycache
273 def _cwd(self):
273 def _cwd(self):
274 return pycompat.getcwd()
274 return pycompat.getcwd()
275
275
276 def getcwd(self):
276 def getcwd(self):
277 '''Return the path from which a canonical path is calculated.
277 '''Return the path from which a canonical path is calculated.
278
278
279 This path should be used to resolve file patterns or to convert
279 This path should be used to resolve file patterns or to convert
280 canonical paths back to file paths for display. It shouldn't be
280 canonical paths back to file paths for display. It shouldn't be
281 used to get real file paths. Use vfs functions instead.
281 used to get real file paths. Use vfs functions instead.
282 '''
282 '''
283 cwd = self._cwd
283 cwd = self._cwd
284 if cwd == self._root:
284 if cwd == self._root:
285 return ''
285 return ''
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 rootsep = self._root
287 rootsep = self._root
288 if not util.endswithsep(rootsep):
288 if not util.endswithsep(rootsep):
289 rootsep += pycompat.ossep
289 rootsep += pycompat.ossep
290 if cwd.startswith(rootsep):
290 if cwd.startswith(rootsep):
291 return cwd[len(rootsep):]
291 return cwd[len(rootsep):]
292 else:
292 else:
293 # we're outside the repo. return an absolute path.
293 # we're outside the repo. return an absolute path.
294 return cwd
294 return cwd
295
295
296 def pathto(self, f, cwd=None):
296 def pathto(self, f, cwd=None):
297 if cwd is None:
297 if cwd is None:
298 cwd = self.getcwd()
298 cwd = self.getcwd()
299 path = util.pathto(self._root, cwd, f)
299 path = util.pathto(self._root, cwd, f)
300 if self._slash:
300 if self._slash:
301 return util.pconvert(path)
301 return util.pconvert(path)
302 return path
302 return path
303
303
304 def __getitem__(self, key):
304 def __getitem__(self, key):
305 '''Return the current state of key (a filename) in the dirstate.
305 '''Return the current state of key (a filename) in the dirstate.
306
306
307 States are:
307 States are:
308 n normal
308 n normal
309 m needs merging
309 m needs merging
310 r marked for removal
310 r marked for removal
311 a marked for addition
311 a marked for addition
312 ? not tracked
312 ? not tracked
313 '''
313 '''
314 return self._map.get(key, ("?",))[0]
314 return self._map.get(key, ("?",))[0]
315
315
316 def __contains__(self, key):
316 def __contains__(self, key):
317 return key in self._map
317 return key in self._map
318
318
319 def __iter__(self):
319 def __iter__(self):
320 for x in sorted(self._map):
320 for x in sorted(self._map):
321 yield x
321 yield x
322
322
323 def iteritems(self):
323 def iteritems(self):
324 return self._map.iteritems()
324 return self._map.iteritems()
325
325
326 def parents(self):
326 def parents(self):
327 return [self._validate(p) for p in self._pl]
327 return [self._validate(p) for p in self._pl]
328
328
329 def p1(self):
329 def p1(self):
330 return self._validate(self._pl[0])
330 return self._validate(self._pl[0])
331
331
332 def p2(self):
332 def p2(self):
333 return self._validate(self._pl[1])
333 return self._validate(self._pl[1])
334
334
335 def branch(self):
335 def branch(self):
336 return encoding.tolocal(self._branch)
336 return encoding.tolocal(self._branch)
337
337
338 def setparents(self, p1, p2=nullid):
338 def setparents(self, p1, p2=nullid):
339 """Set dirstate parents to p1 and p2.
339 """Set dirstate parents to p1 and p2.
340
340
341 When moving from two parents to one, 'm' merged entries a
341 When moving from two parents to one, 'm' merged entries a
342 adjusted to normal and previous copy records discarded and
342 adjusted to normal and previous copy records discarded and
343 returned by the call.
343 returned by the call.
344
344
345 See localrepo.setparents()
345 See localrepo.setparents()
346 """
346 """
347 if self._parentwriters == 0:
347 if self._parentwriters == 0:
348 raise ValueError("cannot set dirstate parent without "
348 raise ValueError("cannot set dirstate parent without "
349 "calling dirstate.beginparentchange")
349 "calling dirstate.beginparentchange")
350
350
351 self._dirty = self._dirtypl = True
351 self._dirty = self._dirtypl = True
352 oldp2 = self._pl[1]
352 oldp2 = self._pl[1]
353 if self._origpl is None:
353 if self._origpl is None:
354 self._origpl = self._pl
354 self._origpl = self._pl
355 self._pl = p1, p2
355 self._pl = p1, p2
356 copies = {}
356 copies = {}
357 if oldp2 != nullid and p2 == nullid:
357 if oldp2 != nullid and p2 == nullid:
358 candidatefiles = self._nonnormalset.union(self._otherparentset)
358 candidatefiles = self._nonnormalset.union(self._otherparentset)
359 for f in candidatefiles:
359 for f in candidatefiles:
360 s = self._map.get(f)
360 s = self._map.get(f)
361 if s is None:
361 if s is None:
362 continue
362 continue
363
363
364 # Discard 'm' markers when moving away from a merge state
364 # Discard 'm' markers when moving away from a merge state
365 if s[0] == 'm':
365 if s[0] == 'm':
366 if f in self._copymap:
366 if f in self._copymap:
367 copies[f] = self._copymap[f]
367 copies[f] = self._copymap[f]
368 self.normallookup(f)
368 self.normallookup(f)
369 # Also fix up otherparent markers
369 # Also fix up otherparent markers
370 elif s[0] == 'n' and s[2] == -2:
370 elif s[0] == 'n' and s[2] == -2:
371 if f in self._copymap:
371 if f in self._copymap:
372 copies[f] = self._copymap[f]
372 copies[f] = self._copymap[f]
373 self.add(f)
373 self.add(f)
374 return copies
374 return copies
375
375
376 def setbranch(self, branch):
376 def setbranch(self, branch):
377 self._branch = encoding.fromlocal(branch)
377 self._branch = encoding.fromlocal(branch)
378 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
378 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
379 try:
379 try:
380 f.write(self._branch + '\n')
380 f.write(self._branch + '\n')
381 f.close()
381 f.close()
382
382
383 # make sure filecache has the correct stat info for _branch after
383 # make sure filecache has the correct stat info for _branch after
384 # replacing the underlying file
384 # replacing the underlying file
385 ce = self._filecache['_branch']
385 ce = self._filecache['_branch']
386 if ce:
386 if ce:
387 ce.refresh()
387 ce.refresh()
388 except: # re-raises
388 except: # re-raises
389 f.discard()
389 f.discard()
390 raise
390 raise
391
391
392 def _opendirstatefile(self):
392 def _opendirstatefile(self):
393 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
393 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
394 if self._pendingmode is not None and self._pendingmode != mode:
394 if self._pendingmode is not None and self._pendingmode != mode:
395 fp.close()
395 fp.close()
396 raise error.Abort(_('working directory state may be '
396 raise error.Abort(_('working directory state may be '
397 'changed parallelly'))
397 'changed parallelly'))
398 self._pendingmode = mode
398 self._pendingmode = mode
399 return fp
399 return fp
400
400
401 def _read(self):
401 def _read(self):
402 self._map = {}
402 self._map = {}
403 self._copymap = {}
403 self._copymap = {}
404 try:
404 try:
405 fp = self._opendirstatefile()
405 fp = self._opendirstatefile()
406 try:
406 try:
407 st = fp.read()
407 st = fp.read()
408 finally:
408 finally:
409 fp.close()
409 fp.close()
410 except IOError as err:
410 except IOError as err:
411 if err.errno != errno.ENOENT:
411 if err.errno != errno.ENOENT:
412 raise
412 raise
413 return
413 return
414 if not st:
414 if not st:
415 return
415 return
416
416
417 if util.safehasattr(parsers, 'dict_new_presized'):
417 if util.safehasattr(parsers, 'dict_new_presized'):
418 # Make an estimate of the number of files in the dirstate based on
418 # Make an estimate of the number of files in the dirstate based on
419 # its size. From a linear regression on a set of real-world repos,
419 # its size. From a linear regression on a set of real-world repos,
420 # all over 10,000 files, the size of a dirstate entry is 85
420 # all over 10,000 files, the size of a dirstate entry is 85
421 # bytes. The cost of resizing is significantly higher than the cost
421 # bytes. The cost of resizing is significantly higher than the cost
422 # of filling in a larger presized dict, so subtract 20% from the
422 # of filling in a larger presized dict, so subtract 20% from the
423 # size.
423 # size.
424 #
424 #
425 # This heuristic is imperfect in many ways, so in a future dirstate
425 # This heuristic is imperfect in many ways, so in a future dirstate
426 # format update it makes sense to just record the number of entries
426 # format update it makes sense to just record the number of entries
427 # on write.
427 # on write.
428 self._map = parsers.dict_new_presized(len(st) / 71)
428 self._map = parsers.dict_new_presized(len(st) / 71)
429
429
430 # Python's garbage collector triggers a GC each time a certain number
430 # Python's garbage collector triggers a GC each time a certain number
431 # of container objects (the number being defined by
431 # of container objects (the number being defined by
432 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
432 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
433 # for each file in the dirstate. The C version then immediately marks
433 # for each file in the dirstate. The C version then immediately marks
434 # them as not to be tracked by the collector. However, this has no
434 # them as not to be tracked by the collector. However, this has no
435 # effect on when GCs are triggered, only on what objects the GC looks
435 # effect on when GCs are triggered, only on what objects the GC looks
436 # into. This means that O(number of files) GCs are unavoidable.
436 # into. This means that O(number of files) GCs are unavoidable.
437 # Depending on when in the process's lifetime the dirstate is parsed,
437 # Depending on when in the process's lifetime the dirstate is parsed,
438 # this can get very expensive. As a workaround, disable GC while
438 # this can get very expensive. As a workaround, disable GC while
439 # parsing the dirstate.
439 # parsing the dirstate.
440 #
440 #
441 # (we cannot decorate the function directly since it is in a C module)
441 # (we cannot decorate the function directly since it is in a C module)
442 parse_dirstate = util.nogc(parsers.parse_dirstate)
442 parse_dirstate = util.nogc(parsers.parse_dirstate)
443 p = parse_dirstate(self._map, self._copymap, st)
443 p = parse_dirstate(self._map, self._copymap, st)
444 if not self._dirtypl:
444 if not self._dirtypl:
445 self._pl = p
445 self._pl = p
446
446
447 def invalidate(self):
447 def invalidate(self):
448 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
448 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
449 "_pl", "_dirs", "_ignore", "_nonnormalset",
449 "_pl", "_dirs", "_ignore", "_nonnormalset",
450 "_otherparentset"):
450 "_otherparentset"):
451 if a in self.__dict__:
451 if a in self.__dict__:
452 delattr(self, a)
452 delattr(self, a)
453 self._lastnormaltime = 0
453 self._lastnormaltime = 0
454 self._dirty = False
454 self._dirty = False
455 self._updatedfiles.clear()
455 self._updatedfiles.clear()
456 self._parentwriters = 0
456 self._parentwriters = 0
457 self._origpl = None
457 self._origpl = None
458
458
459 def copy(self, source, dest):
459 def copy(self, source, dest):
460 """Mark dest as a copy of source. Unmark dest if source is None."""
460 """Mark dest as a copy of source. Unmark dest if source is None."""
461 if source == dest:
461 if source == dest:
462 return
462 return
463 self._dirty = True
463 self._dirty = True
464 if source is not None:
464 if source is not None:
465 self._copymap[dest] = source
465 self._copymap[dest] = source
466 self._updatedfiles.add(source)
466 self._updatedfiles.add(source)
467 self._updatedfiles.add(dest)
467 self._updatedfiles.add(dest)
468 elif dest in self._copymap:
468 elif dest in self._copymap:
469 del self._copymap[dest]
469 del self._copymap[dest]
470 self._updatedfiles.add(dest)
470 self._updatedfiles.add(dest)
471
471
472 def copied(self, file):
472 def copied(self, file):
473 return self._copymap.get(file, None)
473 return self._copymap.get(file, None)
474
474
475 def copies(self):
475 def copies(self):
476 return self._copymap
476 return self._copymap
477
477
478 def _droppath(self, f):
478 def _droppath(self, f):
479 if self[f] not in "?r" and "_dirs" in self.__dict__:
479 if self[f] not in "?r" and "_dirs" in self.__dict__:
480 self._dirs.delpath(f)
480 self._dirs.delpath(f)
481
481
482 if "_filefoldmap" in self.__dict__:
482 if "_filefoldmap" in self.__dict__:
483 normed = util.normcase(f)
483 normed = util.normcase(f)
484 if normed in self._filefoldmap:
484 if normed in self._filefoldmap:
485 del self._filefoldmap[normed]
485 del self._filefoldmap[normed]
486
486
487 self._updatedfiles.add(f)
487 self._updatedfiles.add(f)
488
488
489 def _addpath(self, f, state, mode, size, mtime):
489 def _addpath(self, f, state, mode, size, mtime):
490 oldstate = self[f]
490 oldstate = self[f]
491 if state == 'a' or oldstate == 'r':
491 if state == 'a' or oldstate == 'r':
492 scmutil.checkfilename(f)
492 scmutil.checkfilename(f)
493 if f in self._dirs:
493 if f in self._dirs:
494 raise error.Abort(_('directory %r already in dirstate') % f)
494 raise error.Abort(_('directory %r already in dirstate') % f)
495 # shadows
495 # shadows
496 for d in util.finddirs(f):
496 for d in util.finddirs(f):
497 if d in self._dirs:
497 if d in self._dirs:
498 break
498 break
499 if d in self._map and self[d] != 'r':
499 if d in self._map and self[d] != 'r':
500 raise error.Abort(
500 raise error.Abort(
501 _('file %r in dirstate clashes with %r') % (d, f))
501 _('file %r in dirstate clashes with %r') % (d, f))
502 if oldstate in "?r" and "_dirs" in self.__dict__:
502 if oldstate in "?r" and "_dirs" in self.__dict__:
503 self._dirs.addpath(f)
503 self._dirs.addpath(f)
504 self._dirty = True
504 self._dirty = True
505 self._updatedfiles.add(f)
505 self._updatedfiles.add(f)
506 self._map[f] = dirstatetuple(state, mode, size, mtime)
506 self._map[f] = dirstatetuple(state, mode, size, mtime)
507 if state != 'n' or mtime == -1:
507 if state != 'n' or mtime == -1:
508 self._nonnormalset.add(f)
508 self._nonnormalset.add(f)
509 if size == -2:
509 if size == -2:
510 self._otherparentset.add(f)
510 self._otherparentset.add(f)
511
511
512 def normal(self, f):
512 def normal(self, f):
513 '''Mark a file normal and clean.'''
513 '''Mark a file normal and clean.'''
514 s = os.lstat(self._join(f))
514 s = os.lstat(self._join(f))
515 mtime = s.st_mtime
515 mtime = s.st_mtime
516 self._addpath(f, 'n', s.st_mode,
516 self._addpath(f, 'n', s.st_mode,
517 s.st_size & _rangemask, mtime & _rangemask)
517 s.st_size & _rangemask, mtime & _rangemask)
518 if f in self._copymap:
518 if f in self._copymap:
519 del self._copymap[f]
519 del self._copymap[f]
520 if f in self._nonnormalset:
520 if f in self._nonnormalset:
521 self._nonnormalset.remove(f)
521 self._nonnormalset.remove(f)
522 if mtime > self._lastnormaltime:
522 if mtime > self._lastnormaltime:
523 # Remember the most recent modification timeslot for status(),
523 # Remember the most recent modification timeslot for status(),
524 # to make sure we won't miss future size-preserving file content
524 # to make sure we won't miss future size-preserving file content
525 # modifications that happen within the same timeslot.
525 # modifications that happen within the same timeslot.
526 self._lastnormaltime = mtime
526 self._lastnormaltime = mtime
527
527
528 def normallookup(self, f):
528 def normallookup(self, f):
529 '''Mark a file normal, but possibly dirty.'''
529 '''Mark a file normal, but possibly dirty.'''
530 if self._pl[1] != nullid and f in self._map:
530 if self._pl[1] != nullid and f in self._map:
531 # if there is a merge going on and the file was either
531 # if there is a merge going on and the file was either
532 # in state 'm' (-1) or coming from other parent (-2) before
532 # in state 'm' (-1) or coming from other parent (-2) before
533 # being removed, restore that state.
533 # being removed, restore that state.
534 entry = self._map[f]
534 entry = self._map[f]
535 if entry[0] == 'r' and entry[2] in (-1, -2):
535 if entry[0] == 'r' and entry[2] in (-1, -2):
536 source = self._copymap.get(f)
536 source = self._copymap.get(f)
537 if entry[2] == -1:
537 if entry[2] == -1:
538 self.merge(f)
538 self.merge(f)
539 elif entry[2] == -2:
539 elif entry[2] == -2:
540 self.otherparent(f)
540 self.otherparent(f)
541 if source:
541 if source:
542 self.copy(source, f)
542 self.copy(source, f)
543 return
543 return
544 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
544 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
545 return
545 return
546 self._addpath(f, 'n', 0, -1, -1)
546 self._addpath(f, 'n', 0, -1, -1)
547 if f in self._copymap:
547 if f in self._copymap:
548 del self._copymap[f]
548 del self._copymap[f]
549 if f in self._nonnormalset:
549 if f in self._nonnormalset:
550 self._nonnormalset.remove(f)
550 self._nonnormalset.remove(f)
551
551
552 def otherparent(self, f):
552 def otherparent(self, f):
553 '''Mark as coming from the other parent, always dirty.'''
553 '''Mark as coming from the other parent, always dirty.'''
554 if self._pl[1] == nullid:
554 if self._pl[1] == nullid:
555 raise error.Abort(_("setting %r to other parent "
555 raise error.Abort(_("setting %r to other parent "
556 "only allowed in merges") % f)
556 "only allowed in merges") % f)
557 if f in self and self[f] == 'n':
557 if f in self and self[f] == 'n':
558 # merge-like
558 # merge-like
559 self._addpath(f, 'm', 0, -2, -1)
559 self._addpath(f, 'm', 0, -2, -1)
560 else:
560 else:
561 # add-like
561 # add-like
562 self._addpath(f, 'n', 0, -2, -1)
562 self._addpath(f, 'n', 0, -2, -1)
563
563
564 if f in self._copymap:
564 if f in self._copymap:
565 del self._copymap[f]
565 del self._copymap[f]
566
566
567 def add(self, f):
567 def add(self, f):
568 '''Mark a file added.'''
568 '''Mark a file added.'''
569 self._addpath(f, 'a', 0, -1, -1)
569 self._addpath(f, 'a', 0, -1, -1)
570 if f in self._copymap:
570 if f in self._copymap:
571 del self._copymap[f]
571 del self._copymap[f]
572
572
573 def remove(self, f):
573 def remove(self, f):
574 '''Mark a file removed.'''
574 '''Mark a file removed.'''
575 self._dirty = True
575 self._dirty = True
576 self._droppath(f)
576 self._droppath(f)
577 size = 0
577 size = 0
578 if self._pl[1] != nullid and f in self._map:
578 if self._pl[1] != nullid and f in self._map:
579 # backup the previous state
579 # backup the previous state
580 entry = self._map[f]
580 entry = self._map[f]
581 if entry[0] == 'm': # merge
581 if entry[0] == 'm': # merge
582 size = -1
582 size = -1
583 elif entry[0] == 'n' and entry[2] == -2: # other parent
583 elif entry[0] == 'n' and entry[2] == -2: # other parent
584 size = -2
584 size = -2
585 self._otherparentset.add(f)
585 self._otherparentset.add(f)
586 self._map[f] = dirstatetuple('r', 0, size, 0)
586 self._map[f] = dirstatetuple('r', 0, size, 0)
587 self._nonnormalset.add(f)
587 self._nonnormalset.add(f)
588 if size == 0 and f in self._copymap:
588 if size == 0 and f in self._copymap:
589 del self._copymap[f]
589 del self._copymap[f]
590
590
591 def merge(self, f):
591 def merge(self, f):
592 '''Mark a file merged.'''
592 '''Mark a file merged.'''
593 if self._pl[1] == nullid:
593 if self._pl[1] == nullid:
594 return self.normallookup(f)
594 return self.normallookup(f)
595 return self.otherparent(f)
595 return self.otherparent(f)
596
596
597 def drop(self, f):
597 def drop(self, f):
598 '''Drop a file from the dirstate'''
598 '''Drop a file from the dirstate'''
599 if f in self._map:
599 if f in self._map:
600 self._dirty = True
600 self._dirty = True
601 self._droppath(f)
601 self._droppath(f)
602 del self._map[f]
602 del self._map[f]
603 if f in self._nonnormalset:
603 if f in self._nonnormalset:
604 self._nonnormalset.remove(f)
604 self._nonnormalset.remove(f)
605 if f in self._copymap:
605 if f in self._copymap:
606 del self._copymap[f]
606 del self._copymap[f]
607
607
608 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
608 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
609 if exists is None:
609 if exists is None:
610 exists = os.path.lexists(os.path.join(self._root, path))
610 exists = os.path.lexists(os.path.join(self._root, path))
611 if not exists:
611 if not exists:
612 # Maybe a path component exists
612 # Maybe a path component exists
613 if not ignoremissing and '/' in path:
613 if not ignoremissing and '/' in path:
614 d, f = path.rsplit('/', 1)
614 d, f = path.rsplit('/', 1)
615 d = self._normalize(d, False, ignoremissing, None)
615 d = self._normalize(d, False, ignoremissing, None)
616 folded = d + "/" + f
616 folded = d + "/" + f
617 else:
617 else:
618 # No path components, preserve original case
618 # No path components, preserve original case
619 folded = path
619 folded = path
620 else:
620 else:
621 # recursively normalize leading directory components
621 # recursively normalize leading directory components
622 # against dirstate
622 # against dirstate
623 if '/' in normed:
623 if '/' in normed:
624 d, f = normed.rsplit('/', 1)
624 d, f = normed.rsplit('/', 1)
625 d = self._normalize(d, False, ignoremissing, True)
625 d = self._normalize(d, False, ignoremissing, True)
626 r = self._root + "/" + d
626 r = self._root + "/" + d
627 folded = d + "/" + util.fspath(f, r)
627 folded = d + "/" + util.fspath(f, r)
628 else:
628 else:
629 folded = util.fspath(normed, self._root)
629 folded = util.fspath(normed, self._root)
630 storemap[normed] = folded
630 storemap[normed] = folded
631
631
632 return folded
632 return folded
633
633
634 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
634 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
635 normed = util.normcase(path)
635 normed = util.normcase(path)
636 folded = self._filefoldmap.get(normed, None)
636 folded = self._filefoldmap.get(normed, None)
637 if folded is None:
637 if folded is None:
638 if isknown:
638 if isknown:
639 folded = path
639 folded = path
640 else:
640 else:
641 folded = self._discoverpath(path, normed, ignoremissing, exists,
641 folded = self._discoverpath(path, normed, ignoremissing, exists,
642 self._filefoldmap)
642 self._filefoldmap)
643 return folded
643 return folded
644
644
645 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
645 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
646 normed = util.normcase(path)
646 normed = util.normcase(path)
647 folded = self._filefoldmap.get(normed, None)
647 folded = self._filefoldmap.get(normed, None)
648 if folded is None:
648 if folded is None:
649 folded = self._dirfoldmap.get(normed, None)
649 folded = self._dirfoldmap.get(normed, None)
650 if folded is None:
650 if folded is None:
651 if isknown:
651 if isknown:
652 folded = path
652 folded = path
653 else:
653 else:
654 # store discovered result in dirfoldmap so that future
654 # store discovered result in dirfoldmap so that future
655 # normalizefile calls don't start matching directories
655 # normalizefile calls don't start matching directories
656 folded = self._discoverpath(path, normed, ignoremissing, exists,
656 folded = self._discoverpath(path, normed, ignoremissing, exists,
657 self._dirfoldmap)
657 self._dirfoldmap)
658 return folded
658 return folded
659
659
660 def normalize(self, path, isknown=False, ignoremissing=False):
660 def normalize(self, path, isknown=False, ignoremissing=False):
661 '''
661 '''
662 normalize the case of a pathname when on a casefolding filesystem
662 normalize the case of a pathname when on a casefolding filesystem
663
663
664 isknown specifies whether the filename came from walking the
664 isknown specifies whether the filename came from walking the
665 disk, to avoid extra filesystem access.
665 disk, to avoid extra filesystem access.
666
666
667 If ignoremissing is True, missing path are returned
667 If ignoremissing is True, missing path are returned
668 unchanged. Otherwise, we try harder to normalize possibly
668 unchanged. Otherwise, we try harder to normalize possibly
669 existing path components.
669 existing path components.
670
670
671 The normalized case is determined based on the following precedence:
671 The normalized case is determined based on the following precedence:
672
672
673 - version of name already stored in the dirstate
673 - version of name already stored in the dirstate
674 - version of name stored on disk
674 - version of name stored on disk
675 - version provided via command arguments
675 - version provided via command arguments
676 '''
676 '''
677
677
678 if self._checkcase:
678 if self._checkcase:
679 return self._normalize(path, isknown, ignoremissing)
679 return self._normalize(path, isknown, ignoremissing)
680 return path
680 return path
681
681
682 def clear(self):
682 def clear(self):
683 self._map = {}
683 self._map = {}
684 self._nonnormalset = set()
684 self._nonnormalset = set()
685 self._otherparentset = set()
685 self._otherparentset = set()
686 if "_dirs" in self.__dict__:
686 if "_dirs" in self.__dict__:
687 delattr(self, "_dirs")
687 delattr(self, "_dirs")
688 self._copymap = {}
688 self._copymap = {}
689 self._pl = [nullid, nullid]
689 self._pl = [nullid, nullid]
690 self._lastnormaltime = 0
690 self._lastnormaltime = 0
691 self._updatedfiles.clear()
691 self._updatedfiles.clear()
692 self._dirty = True
692 self._dirty = True
693
693
694 def rebuild(self, parent, allfiles, changedfiles=None):
694 def rebuild(self, parent, allfiles, changedfiles=None):
695 if changedfiles is None:
695 if changedfiles is None:
696 # Rebuild entire dirstate
696 # Rebuild entire dirstate
697 changedfiles = allfiles
697 changedfiles = allfiles
698 lastnormaltime = self._lastnormaltime
698 lastnormaltime = self._lastnormaltime
699 self.clear()
699 self.clear()
700 self._lastnormaltime = lastnormaltime
700 self._lastnormaltime = lastnormaltime
701
701
702 if self._origpl is None:
702 if self._origpl is None:
703 self._origpl = self._pl
703 self._origpl = self._pl
704 self._pl = (parent, nullid)
704 self._pl = (parent, nullid)
705 for f in changedfiles:
705 for f in changedfiles:
706 if f in allfiles:
706 if f in allfiles:
707 self.normallookup(f)
707 self.normallookup(f)
708 else:
708 else:
709 self.drop(f)
709 self.drop(f)
710
710
711 self._dirty = True
711 self._dirty = True
712
712
713 def write(self, tr):
713 def write(self, tr):
714 if not self._dirty:
714 if not self._dirty:
715 return
715 return
716
716
717 filename = self._filename
717 filename = self._filename
718 if tr:
718 if tr:
719 # 'dirstate.write()' is not only for writing in-memory
719 # 'dirstate.write()' is not only for writing in-memory
720 # changes out, but also for dropping ambiguous timestamp.
720 # changes out, but also for dropping ambiguous timestamp.
721 # delayed writing re-raise "ambiguous timestamp issue".
721 # delayed writing re-raise "ambiguous timestamp issue".
722 # See also the wiki page below for detail:
722 # See also the wiki page below for detail:
723 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
723 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
724
724
725 # emulate dropping timestamp in 'parsers.pack_dirstate'
725 # emulate dropping timestamp in 'parsers.pack_dirstate'
726 now = _getfsnow(self._opener)
726 now = _getfsnow(self._opener)
727 dmap = self._map
727 dmap = self._map
728 for f in self._updatedfiles:
728 for f in self._updatedfiles:
729 e = dmap.get(f)
729 e = dmap.get(f)
730 if e is not None and e[0] == 'n' and e[3] == now:
730 if e is not None and e[0] == 'n' and e[3] == now:
731 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
731 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
732 self._nonnormalset.add(f)
732 self._nonnormalset.add(f)
733
733
734 # emulate that all 'dirstate.normal' results are written out
734 # emulate that all 'dirstate.normal' results are written out
735 self._lastnormaltime = 0
735 self._lastnormaltime = 0
736 self._updatedfiles.clear()
736 self._updatedfiles.clear()
737
737
738 # delay writing in-memory changes out
738 # delay writing in-memory changes out
739 tr.addfilegenerator('dirstate', (self._filename,),
739 tr.addfilegenerator('dirstate', (self._filename,),
740 self._writedirstate, location='plain')
740 self._writedirstate, location='plain')
741 return
741 return
742
742
743 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
743 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
744 self._writedirstate(st)
744 self._writedirstate(st)
745
745
746 def addparentchangecallback(self, category, callback):
746 def addparentchangecallback(self, category, callback):
747 """add a callback to be called when the wd parents are changed
747 """add a callback to be called when the wd parents are changed
748
748
749 Callback will be called with the following arguments:
749 Callback will be called with the following arguments:
750 dirstate, (oldp1, oldp2), (newp1, newp2)
750 dirstate, (oldp1, oldp2), (newp1, newp2)
751
751
752 Category is a unique identifier to allow overwriting an old callback
752 Category is a unique identifier to allow overwriting an old callback
753 with a newer callback.
753 with a newer callback.
754 """
754 """
755 self._plchangecallbacks[category] = callback
755 self._plchangecallbacks[category] = callback
756
756
757 def _writedirstate(self, st):
757 def _writedirstate(self, st):
758 # notify callbacks about parents change
758 # notify callbacks about parents change
759 if self._origpl is not None and self._origpl != self._pl:
759 if self._origpl is not None and self._origpl != self._pl:
760 for c, callback in sorted(self._plchangecallbacks.iteritems()):
760 for c, callback in sorted(self._plchangecallbacks.iteritems()):
761 callback(self, self._origpl, self._pl)
761 callback(self, self._origpl, self._pl)
762 self._origpl = None
762 self._origpl = None
763 # use the modification time of the newly created temporary file as the
763 # use the modification time of the newly created temporary file as the
764 # filesystem's notion of 'now'
764 # filesystem's notion of 'now'
765 now = util.fstat(st).st_mtime & _rangemask
765 now = util.fstat(st).st_mtime & _rangemask
766
766
767 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
767 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
768 # timestamp of each entries in dirstate, because of 'now > mtime'
768 # timestamp of each entries in dirstate, because of 'now > mtime'
769 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
769 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
770 if delaywrite > 0:
770 if delaywrite > 0:
771 # do we have any files to delay for?
771 # do we have any files to delay for?
772 for f, e in self._map.iteritems():
772 for f, e in self._map.iteritems():
773 if e[0] == 'n' and e[3] == now:
773 if e[0] == 'n' and e[3] == now:
774 import time # to avoid useless import
774 import time # to avoid useless import
775 # rather than sleep n seconds, sleep until the next
775 # rather than sleep n seconds, sleep until the next
776 # multiple of n seconds
776 # multiple of n seconds
777 clock = time.time()
777 clock = time.time()
778 start = int(clock) - (int(clock) % delaywrite)
778 start = int(clock) - (int(clock) % delaywrite)
779 end = start + delaywrite
779 end = start + delaywrite
780 time.sleep(end - clock)
780 time.sleep(end - clock)
781 now = end # trust our estimate that the end is near now
781 now = end # trust our estimate that the end is near now
782 break
782 break
783
783
784 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
784 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
785 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
785 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
786 st.close()
786 st.close()
787 self._lastnormaltime = 0
787 self._lastnormaltime = 0
788 self._dirty = self._dirtypl = False
788 self._dirty = self._dirtypl = False
789
789
790 def _dirignore(self, f):
790 def _dirignore(self, f):
791 if f == '.':
791 if f == '.':
792 return False
792 return False
793 if self._ignore(f):
793 if self._ignore(f):
794 return True
794 return True
795 for p in util.finddirs(f):
795 for p in util.finddirs(f):
796 if self._ignore(p):
796 if self._ignore(p):
797 return True
797 return True
798 return False
798 return False
799
799
800 def _ignorefiles(self):
800 def _ignorefiles(self):
801 files = []
801 files = []
802 if os.path.exists(self._join('.hgignore')):
802 if os.path.exists(self._join('.hgignore')):
803 files.append(self._join('.hgignore'))
803 files.append(self._join('.hgignore'))
804 for name, path in self._ui.configitems("ui"):
804 for name, path in self._ui.configitems("ui"):
805 if name == 'ignore' or name.startswith('ignore.'):
805 if name == 'ignore' or name.startswith('ignore.'):
806 # we need to use os.path.join here rather than self._join
806 # we need to use os.path.join here rather than self._join
807 # because path is arbitrary and user-specified
807 # because path is arbitrary and user-specified
808 files.append(os.path.join(self._rootdir, util.expandpath(path)))
808 files.append(os.path.join(self._rootdir, util.expandpath(path)))
809 return files
809 return files
810
810
811 def _ignorefileandline(self, f):
811 def _ignorefileandline(self, f):
812 files = collections.deque(self._ignorefiles())
812 files = collections.deque(self._ignorefiles())
813 visited = set()
813 visited = set()
814 while files:
814 while files:
815 i = files.popleft()
815 i = files.popleft()
816 patterns = matchmod.readpatternfile(i, self._ui.warn,
816 patterns = matchmod.readpatternfile(i, self._ui.warn,
817 sourceinfo=True)
817 sourceinfo=True)
818 for pattern, lineno, line in patterns:
818 for pattern, lineno, line in patterns:
819 kind, p = matchmod._patsplit(pattern, 'glob')
819 kind, p = matchmod._patsplit(pattern, 'glob')
820 if kind == "subinclude":
820 if kind == "subinclude":
821 if p not in visited:
821 if p not in visited:
822 files.append(p)
822 files.append(p)
823 continue
823 continue
824 m = matchmod.match(self._root, '', [], [pattern],
824 m = matchmod.match(self._root, '', [], [pattern],
825 warn=self._ui.warn)
825 warn=self._ui.warn)
826 if m(f):
826 if m(f):
827 return (i, lineno, line)
827 return (i, lineno, line)
828 visited.add(i)
828 visited.add(i)
829 return (None, -1, "")
829 return (None, -1, "")
830
830
831 def _walkexplicit(self, match, subrepos):
831 def _walkexplicit(self, match, subrepos):
832 '''Get stat data about the files explicitly specified by match.
832 '''Get stat data about the files explicitly specified by match.
833
833
834 Return a triple (results, dirsfound, dirsnotfound).
834 Return a triple (results, dirsfound, dirsnotfound).
835 - results is a mapping from filename to stat result. It also contains
835 - results is a mapping from filename to stat result. It also contains
836 listings mapping subrepos and .hg to None.
836 listings mapping subrepos and .hg to None.
837 - dirsfound is a list of files found to be directories.
837 - dirsfound is a list of files found to be directories.
838 - dirsnotfound is a list of files that the dirstate thinks are
838 - dirsnotfound is a list of files that the dirstate thinks are
839 directories and that were not found.'''
839 directories and that were not found.'''
840
840
841 def badtype(mode):
841 def badtype(mode):
842 kind = _('unknown')
842 kind = _('unknown')
843 if stat.S_ISCHR(mode):
843 if stat.S_ISCHR(mode):
844 kind = _('character device')
844 kind = _('character device')
845 elif stat.S_ISBLK(mode):
845 elif stat.S_ISBLK(mode):
846 kind = _('block device')
846 kind = _('block device')
847 elif stat.S_ISFIFO(mode):
847 elif stat.S_ISFIFO(mode):
848 kind = _('fifo')
848 kind = _('fifo')
849 elif stat.S_ISSOCK(mode):
849 elif stat.S_ISSOCK(mode):
850 kind = _('socket')
850 kind = _('socket')
851 elif stat.S_ISDIR(mode):
851 elif stat.S_ISDIR(mode):
852 kind = _('directory')
852 kind = _('directory')
853 return _('unsupported file type (type is %s)') % kind
853 return _('unsupported file type (type is %s)') % kind
854
854
855 matchedir = match.explicitdir
855 matchedir = match.explicitdir
856 badfn = match.bad
856 badfn = match.bad
857 dmap = self._map
857 dmap = self._map
858 lstat = os.lstat
858 lstat = os.lstat
859 getkind = stat.S_IFMT
859 getkind = stat.S_IFMT
860 dirkind = stat.S_IFDIR
860 dirkind = stat.S_IFDIR
861 regkind = stat.S_IFREG
861 regkind = stat.S_IFREG
862 lnkkind = stat.S_IFLNK
862 lnkkind = stat.S_IFLNK
863 join = self._join
863 join = self._join
864 dirsfound = []
864 dirsfound = []
865 foundadd = dirsfound.append
865 foundadd = dirsfound.append
866 dirsnotfound = []
866 dirsnotfound = []
867 notfoundadd = dirsnotfound.append
867 notfoundadd = dirsnotfound.append
868
868
869 if not match.isexact() and self._checkcase:
869 if not match.isexact() and self._checkcase:
870 normalize = self._normalize
870 normalize = self._normalize
871 else:
871 else:
872 normalize = None
872 normalize = None
873
873
874 files = sorted(match.files())
874 files = sorted(match.files())
875 subrepos.sort()
875 subrepos.sort()
876 i, j = 0, 0
876 i, j = 0, 0
877 while i < len(files) and j < len(subrepos):
877 while i < len(files) and j < len(subrepos):
878 subpath = subrepos[j] + "/"
878 subpath = subrepos[j] + "/"
879 if files[i] < subpath:
879 if files[i] < subpath:
880 i += 1
880 i += 1
881 continue
881 continue
882 while i < len(files) and files[i].startswith(subpath):
882 while i < len(files) and files[i].startswith(subpath):
883 del files[i]
883 del files[i]
884 j += 1
884 j += 1
885
885
886 if not files or '.' in files:
886 if not files or '.' in files:
887 files = ['.']
887 files = ['.']
888 results = dict.fromkeys(subrepos)
888 results = dict.fromkeys(subrepos)
889 results['.hg'] = None
889 results['.hg'] = None
890
890
891 alldirs = None
891 alldirs = None
892 for ff in files:
892 for ff in files:
893 # constructing the foldmap is expensive, so don't do it for the
893 # constructing the foldmap is expensive, so don't do it for the
894 # common case where files is ['.']
894 # common case where files is ['.']
895 if normalize and ff != '.':
895 if normalize and ff != '.':
896 nf = normalize(ff, False, True)
896 nf = normalize(ff, False, True)
897 else:
897 else:
898 nf = ff
898 nf = ff
899 if nf in results:
899 if nf in results:
900 continue
900 continue
901
901
902 try:
902 try:
903 st = lstat(join(nf))
903 st = lstat(join(nf))
904 kind = getkind(st.st_mode)
904 kind = getkind(st.st_mode)
905 if kind == dirkind:
905 if kind == dirkind:
906 if nf in dmap:
906 if nf in dmap:
907 # file replaced by dir on disk but still in dirstate
907 # file replaced by dir on disk but still in dirstate
908 results[nf] = None
908 results[nf] = None
909 if matchedir:
909 if matchedir:
910 matchedir(nf)
910 matchedir(nf)
911 foundadd((nf, ff))
911 foundadd((nf, ff))
912 elif kind == regkind or kind == lnkkind:
912 elif kind == regkind or kind == lnkkind:
913 results[nf] = st
913 results[nf] = st
914 else:
914 else:
915 badfn(ff, badtype(kind))
915 badfn(ff, badtype(kind))
916 if nf in dmap:
916 if nf in dmap:
917 results[nf] = None
917 results[nf] = None
918 except OSError as inst: # nf not found on disk - it is dirstate only
918 except OSError as inst: # nf not found on disk - it is dirstate only
919 if nf in dmap: # does it exactly match a missing file?
919 if nf in dmap: # does it exactly match a missing file?
920 results[nf] = None
920 results[nf] = None
921 else: # does it match a missing directory?
921 else: # does it match a missing directory?
922 if alldirs is None:
922 if alldirs is None:
923 alldirs = util.dirs(dmap)
923 alldirs = util.dirs(dmap)
924 if nf in alldirs:
924 if nf in alldirs:
925 if matchedir:
925 if matchedir:
926 matchedir(nf)
926 matchedir(nf)
927 notfoundadd(nf)
927 notfoundadd(nf)
928 else:
928 else:
929 badfn(ff, inst.strerror)
929 badfn(ff, inst.strerror)
930
930
931 # Case insensitive filesystems cannot rely on lstat() failing to detect
931 # Case insensitive filesystems cannot rely on lstat() failing to detect
932 # a case-only rename. Prune the stat object for any file that does not
932 # a case-only rename. Prune the stat object for any file that does not
933 # match the case in the filesystem, if there are multiple files that
933 # match the case in the filesystem, if there are multiple files that
934 # normalize to the same path.
934 # normalize to the same path.
935 if match.isexact() and self._checkcase:
935 if match.isexact() and self._checkcase:
936 normed = {}
936 normed = {}
937
937
938 for f, st in results.iteritems():
938 for f, st in results.iteritems():
939 if st is None:
939 if st is None:
940 continue
940 continue
941
941
942 nc = util.normcase(f)
942 nc = util.normcase(f)
943 paths = normed.get(nc)
943 paths = normed.get(nc)
944
944
945 if paths is None:
945 if paths is None:
946 paths = set()
946 paths = set()
947 normed[nc] = paths
947 normed[nc] = paths
948
948
949 paths.add(f)
949 paths.add(f)
950
950
951 for norm, paths in normed.iteritems():
951 for norm, paths in normed.iteritems():
952 if len(paths) > 1:
952 if len(paths) > 1:
953 for path in paths:
953 for path in paths:
954 folded = self._discoverpath(path, norm, True, None,
954 folded = self._discoverpath(path, norm, True, None,
955 self._dirfoldmap)
955 self._dirfoldmap)
956 if path != folded:
956 if path != folded:
957 results[path] = None
957 results[path] = None
958
958
959 return results, dirsfound, dirsnotfound
959 return results, dirsfound, dirsnotfound
960
960
961 def walk(self, match, subrepos, unknown, ignored, full=True):
961 def walk(self, match, subrepos, unknown, ignored, full=True):
962 '''
962 '''
963 Walk recursively through the directory tree, finding all files
963 Walk recursively through the directory tree, finding all files
964 matched by match.
964 matched by match.
965
965
966 If full is False, maybe skip some known-clean files.
966 If full is False, maybe skip some known-clean files.
967
967
968 Return a dict mapping filename to stat-like object (either
968 Return a dict mapping filename to stat-like object (either
969 mercurial.osutil.stat instance or return value of os.stat()).
969 mercurial.osutil.stat instance or return value of os.stat()).
970
970
971 '''
971 '''
972 # full is a flag that extensions that hook into walk can use -- this
972 # full is a flag that extensions that hook into walk can use -- this
973 # implementation doesn't use it at all. This satisfies the contract
973 # implementation doesn't use it at all. This satisfies the contract
974 # because we only guarantee a "maybe".
974 # because we only guarantee a "maybe".
975
975
976 if ignored:
976 if ignored:
977 ignore = util.never
977 ignore = util.never
978 dirignore = util.never
978 dirignore = util.never
979 elif unknown:
979 elif unknown:
980 ignore = self._ignore
980 ignore = self._ignore
981 dirignore = self._dirignore
981 dirignore = self._dirignore
982 else:
982 else:
983 # if not unknown and not ignored, drop dir recursion and step 2
983 # if not unknown and not ignored, drop dir recursion and step 2
984 ignore = util.always
984 ignore = util.always
985 dirignore = util.always
985 dirignore = util.always
986
986
987 matchfn = match.matchfn
987 matchfn = match.matchfn
988 matchalways = match.always()
988 matchalways = match.always()
989 matchtdir = match.traversedir
989 matchtdir = match.traversedir
990 dmap = self._map
990 dmap = self._map
991 listdir = osutil.listdir
991 listdir = osutil.listdir
992 lstat = os.lstat
992 lstat = os.lstat
993 dirkind = stat.S_IFDIR
993 dirkind = stat.S_IFDIR
994 regkind = stat.S_IFREG
994 regkind = stat.S_IFREG
995 lnkkind = stat.S_IFLNK
995 lnkkind = stat.S_IFLNK
996 join = self._join
996 join = self._join
997
997
998 exact = skipstep3 = False
998 exact = skipstep3 = False
999 if match.isexact(): # match.exact
999 if match.isexact(): # match.exact
1000 exact = True
1000 exact = True
1001 dirignore = util.always # skip step 2
1001 dirignore = util.always # skip step 2
1002 elif match.prefix(): # match.match, no patterns
1002 elif match.prefix(): # match.match, no patterns
1003 skipstep3 = True
1003 skipstep3 = True
1004
1004
1005 if not exact and self._checkcase:
1005 if not exact and self._checkcase:
1006 normalize = self._normalize
1006 normalize = self._normalize
1007 normalizefile = self._normalizefile
1007 normalizefile = self._normalizefile
1008 skipstep3 = False
1008 skipstep3 = False
1009 else:
1009 else:
1010 normalize = self._normalize
1010 normalize = self._normalize
1011 normalizefile = None
1011 normalizefile = None
1012
1012
1013 # step 1: find all explicit files
1013 # step 1: find all explicit files
1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1015
1015
1016 skipstep3 = skipstep3 and not (work or dirsnotfound)
1016 skipstep3 = skipstep3 and not (work or dirsnotfound)
1017 work = [d for d in work if not dirignore(d[0])]
1017 work = [d for d in work if not dirignore(d[0])]
1018
1018
1019 # step 2: visit subdirectories
1019 # step 2: visit subdirectories
1020 def traverse(work, alreadynormed):
1020 def traverse(work, alreadynormed):
1021 wadd = work.append
1021 wadd = work.append
1022 while work:
1022 while work:
1023 nd = work.pop()
1023 nd = work.pop()
1024 skip = None
1024 skip = None
1025 if nd == '.':
1025 if nd == '.':
1026 nd = ''
1026 nd = ''
1027 else:
1027 else:
1028 skip = '.hg'
1028 skip = '.hg'
1029 try:
1029 try:
1030 entries = listdir(join(nd), stat=True, skip=skip)
1030 entries = listdir(join(nd), stat=True, skip=skip)
1031 except OSError as inst:
1031 except OSError as inst:
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1033 match.bad(self.pathto(nd), inst.strerror)
1033 match.bad(self.pathto(nd), inst.strerror)
1034 continue
1034 continue
1035 raise
1035 raise
1036 for f, kind, st in entries:
1036 for f, kind, st in entries:
1037 if normalizefile:
1037 if normalizefile:
1038 # even though f might be a directory, we're only
1038 # even though f might be a directory, we're only
1039 # interested in comparing it to files currently in the
1039 # interested in comparing it to files currently in the
1040 # dmap -- therefore normalizefile is enough
1040 # dmap -- therefore normalizefile is enough
1041 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1041 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1042 True)
1042 True)
1043 else:
1043 else:
1044 nf = nd and (nd + "/" + f) or f
1044 nf = nd and (nd + "/" + f) or f
1045 if nf not in results:
1045 if nf not in results:
1046 if kind == dirkind:
1046 if kind == dirkind:
1047 if not ignore(nf):
1047 if not ignore(nf):
1048 if matchtdir:
1048 if matchtdir:
1049 matchtdir(nf)
1049 matchtdir(nf)
1050 wadd(nf)
1050 wadd(nf)
1051 if nf in dmap and (matchalways or matchfn(nf)):
1051 if nf in dmap and (matchalways or matchfn(nf)):
1052 results[nf] = None
1052 results[nf] = None
1053 elif kind == regkind or kind == lnkkind:
1053 elif kind == regkind or kind == lnkkind:
1054 if nf in dmap:
1054 if nf in dmap:
1055 if matchalways or matchfn(nf):
1055 if matchalways or matchfn(nf):
1056 results[nf] = st
1056 results[nf] = st
1057 elif ((matchalways or matchfn(nf))
1057 elif ((matchalways or matchfn(nf))
1058 and not ignore(nf)):
1058 and not ignore(nf)):
1059 # unknown file -- normalize if necessary
1059 # unknown file -- normalize if necessary
1060 if not alreadynormed:
1060 if not alreadynormed:
1061 nf = normalize(nf, False, True)
1061 nf = normalize(nf, False, True)
1062 results[nf] = st
1062 results[nf] = st
1063 elif nf in dmap and (matchalways or matchfn(nf)):
1063 elif nf in dmap and (matchalways or matchfn(nf)):
1064 results[nf] = None
1064 results[nf] = None
1065
1065
1066 for nd, d in work:
1066 for nd, d in work:
1067 # alreadynormed means that processwork doesn't have to do any
1067 # alreadynormed means that processwork doesn't have to do any
1068 # expensive directory normalization
1068 # expensive directory normalization
1069 alreadynormed = not normalize or nd == d
1069 alreadynormed = not normalize or nd == d
1070 traverse([d], alreadynormed)
1070 traverse([d], alreadynormed)
1071
1071
1072 for s in subrepos:
1072 for s in subrepos:
1073 del results[s]
1073 del results[s]
1074 del results['.hg']
1074 del results['.hg']
1075
1075
1076 # step 3: visit remaining files from dmap
1076 # step 3: visit remaining files from dmap
1077 if not skipstep3 and not exact:
1077 if not skipstep3 and not exact:
1078 # If a dmap file is not in results yet, it was either
1078 # If a dmap file is not in results yet, it was either
1079 # a) not matching matchfn b) ignored, c) missing, or d) under a
1079 # a) not matching matchfn b) ignored, c) missing, or d) under a
1080 # symlink directory.
1080 # symlink directory.
1081 if not results and matchalways:
1081 if not results and matchalways:
1082 visit = [f for f in dmap]
1082 visit = [f for f in dmap]
1083 else:
1083 else:
1084 visit = [f for f in dmap if f not in results and matchfn(f)]
1084 visit = [f for f in dmap if f not in results and matchfn(f)]
1085 visit.sort()
1085 visit.sort()
1086
1086
1087 if unknown:
1087 if unknown:
1088 # unknown == True means we walked all dirs under the roots
1088 # unknown == True means we walked all dirs under the roots
1089 # that wasn't ignored, and everything that matched was stat'ed
1089 # that wasn't ignored, and everything that matched was stat'ed
1090 # and is already in results.
1090 # and is already in results.
1091 # The rest must thus be ignored or under a symlink.
1091 # The rest must thus be ignored or under a symlink.
1092 audit_path = pathutil.pathauditor(self._root)
1092 audit_path = pathutil.pathauditor(self._root)
1093
1093
1094 for nf in iter(visit):
1094 for nf in iter(visit):
1095 # If a stat for the same file was already added with a
1095 # If a stat for the same file was already added with a
1096 # different case, don't add one for this, since that would
1096 # different case, don't add one for this, since that would
1097 # make it appear as if the file exists under both names
1097 # make it appear as if the file exists under both names
1098 # on disk.
1098 # on disk.
1099 if (normalizefile and
1099 if (normalizefile and
1100 normalizefile(nf, True, True) in results):
1100 normalizefile(nf, True, True) in results):
1101 results[nf] = None
1101 results[nf] = None
1102 # Report ignored items in the dmap as long as they are not
1102 # Report ignored items in the dmap as long as they are not
1103 # under a symlink directory.
1103 # under a symlink directory.
1104 elif audit_path.check(nf):
1104 elif audit_path.check(nf):
1105 try:
1105 try:
1106 results[nf] = lstat(join(nf))
1106 results[nf] = lstat(join(nf))
1107 # file was just ignored, no links, and exists
1107 # file was just ignored, no links, and exists
1108 except OSError:
1108 except OSError:
1109 # file doesn't exist
1109 # file doesn't exist
1110 results[nf] = None
1110 results[nf] = None
1111 else:
1111 else:
1112 # It's either missing or under a symlink directory
1112 # It's either missing or under a symlink directory
1113 # which we in this case report as missing
1113 # which we in this case report as missing
1114 results[nf] = None
1114 results[nf] = None
1115 else:
1115 else:
1116 # We may not have walked the full directory tree above,
1116 # We may not have walked the full directory tree above,
1117 # so stat and check everything we missed.
1117 # so stat and check everything we missed.
1118 iv = iter(visit)
1118 iv = iter(visit)
1119 for st in util.statfiles([join(i) for i in visit]):
1119 for st in util.statfiles([join(i) for i in visit]):
1120 results[next(iv)] = st
1120 results[next(iv)] = st
1121 return results
1121 return results
1122
1122
1123 def status(self, match, subrepos, ignored, clean, unknown):
1123 def status(self, match, subrepos, ignored, clean, unknown):
1124 '''Determine the status of the working copy relative to the
1124 '''Determine the status of the working copy relative to the
1125 dirstate and return a pair of (unsure, status), where status is of type
1125 dirstate and return a pair of (unsure, status), where status is of type
1126 scmutil.status and:
1126 scmutil.status and:
1127
1127
1128 unsure:
1128 unsure:
1129 files that might have been modified since the dirstate was
1129 files that might have been modified since the dirstate was
1130 written, but need to be read to be sure (size is the same
1130 written, but need to be read to be sure (size is the same
1131 but mtime differs)
1131 but mtime differs)
1132 status.modified:
1132 status.modified:
1133 files that have definitely been modified since the dirstate
1133 files that have definitely been modified since the dirstate
1134 was written (different size or mode)
1134 was written (different size or mode)
1135 status.clean:
1135 status.clean:
1136 files that have definitely not been modified since the
1136 files that have definitely not been modified since the
1137 dirstate was written
1137 dirstate was written
1138 '''
1138 '''
1139 listignored, listclean, listunknown = ignored, clean, unknown
1139 listignored, listclean, listunknown = ignored, clean, unknown
1140 lookup, modified, added, unknown, ignored = [], [], [], [], []
1140 lookup, modified, added, unknown, ignored = [], [], [], [], []
1141 removed, deleted, clean = [], [], []
1141 removed, deleted, clean = [], [], []
1142
1142
1143 dmap = self._map
1143 dmap = self._map
1144 ladd = lookup.append # aka "unsure"
1144 ladd = lookup.append # aka "unsure"
1145 madd = modified.append
1145 madd = modified.append
1146 aadd = added.append
1146 aadd = added.append
1147 uadd = unknown.append
1147 uadd = unknown.append
1148 iadd = ignored.append
1148 iadd = ignored.append
1149 radd = removed.append
1149 radd = removed.append
1150 dadd = deleted.append
1150 dadd = deleted.append
1151 cadd = clean.append
1151 cadd = clean.append
1152 mexact = match.exact
1152 mexact = match.exact
1153 dirignore = self._dirignore
1153 dirignore = self._dirignore
1154 checkexec = self._checkexec
1154 checkexec = self._checkexec
1155 copymap = self._copymap
1155 copymap = self._copymap
1156 lastnormaltime = self._lastnormaltime
1156 lastnormaltime = self._lastnormaltime
1157
1157
1158 # We need to do full walks when either
1158 # We need to do full walks when either
1159 # - we're listing all clean files, or
1159 # - we're listing all clean files, or
1160 # - match.traversedir does something, because match.traversedir should
1160 # - match.traversedir does something, because match.traversedir should
1161 # be called for every dir in the working dir
1161 # be called for every dir in the working dir
1162 full = listclean or match.traversedir is not None
1162 full = listclean or match.traversedir is not None
1163 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1163 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1164 full=full).iteritems():
1164 full=full).iteritems():
1165 if fn not in dmap:
1165 if fn not in dmap:
1166 if (listignored or mexact(fn)) and dirignore(fn):
1166 if (listignored or mexact(fn)) and dirignore(fn):
1167 if listignored:
1167 if listignored:
1168 iadd(fn)
1168 iadd(fn)
1169 else:
1169 else:
1170 uadd(fn)
1170 uadd(fn)
1171 continue
1171 continue
1172
1172
1173 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1173 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1174 # written like that for performance reasons. dmap[fn] is not a
1174 # written like that for performance reasons. dmap[fn] is not a
1175 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1175 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1176 # opcode has fast paths when the value to be unpacked is a tuple or
1176 # opcode has fast paths when the value to be unpacked is a tuple or
1177 # a list, but falls back to creating a full-fledged iterator in
1177 # a list, but falls back to creating a full-fledged iterator in
1178 # general. That is much slower than simply accessing and storing the
1178 # general. That is much slower than simply accessing and storing the
1179 # tuple members one by one.
1179 # tuple members one by one.
1180 t = dmap[fn]
1180 t = dmap[fn]
1181 state = t[0]
1181 state = t[0]
1182 mode = t[1]
1182 mode = t[1]
1183 size = t[2]
1183 size = t[2]
1184 time = t[3]
1184 time = t[3]
1185
1185
1186 if not st and state in "nma":
1186 if not st and state in "nma":
1187 dadd(fn)
1187 dadd(fn)
1188 elif state == 'n':
1188 elif state == 'n':
1189 if (size >= 0 and
1189 if (size >= 0 and
1190 ((size != st.st_size and size != st.st_size & _rangemask)
1190 ((size != st.st_size and size != st.st_size & _rangemask)
1191 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1191 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1192 or size == -2 # other parent
1192 or size == -2 # other parent
1193 or fn in copymap):
1193 or fn in copymap):
1194 madd(fn)
1194 madd(fn)
1195 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1195 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1196 ladd(fn)
1196 ladd(fn)
1197 elif st.st_mtime == lastnormaltime:
1197 elif st.st_mtime == lastnormaltime:
1198 # fn may have just been marked as normal and it may have
1198 # fn may have just been marked as normal and it may have
1199 # changed in the same second without changing its size.
1199 # changed in the same second without changing its size.
1200 # This can happen if we quickly do multiple commits.
1200 # This can happen if we quickly do multiple commits.
1201 # Force lookup, so we don't miss such a racy file change.
1201 # Force lookup, so we don't miss such a racy file change.
1202 ladd(fn)
1202 ladd(fn)
1203 elif listclean:
1203 elif listclean:
1204 cadd(fn)
1204 cadd(fn)
1205 elif state == 'm':
1205 elif state == 'm':
1206 madd(fn)
1206 madd(fn)
1207 elif state == 'a':
1207 elif state == 'a':
1208 aadd(fn)
1208 aadd(fn)
1209 elif state == 'r':
1209 elif state == 'r':
1210 radd(fn)
1210 radd(fn)
1211
1211
1212 return (lookup, scmutil.status(modified, added, removed, deleted,
1212 return (lookup, scmutil.status(modified, added, removed, deleted,
1213 unknown, ignored, clean))
1213 unknown, ignored, clean))
1214
1214
1215 def matches(self, match):
1215 def matches(self, match):
1216 '''
1216 '''
1217 return files in the dirstate (in whatever state) filtered by match
1217 return files in the dirstate (in whatever state) filtered by match
1218 '''
1218 '''
1219 dmap = self._map
1219 dmap = self._map
1220 if match.always():
1220 if match.always():
1221 return dmap.keys()
1221 return dmap.keys()
1222 files = match.files()
1222 files = match.files()
1223 if match.isexact():
1223 if match.isexact():
1224 # fast path -- filter the other way around, since typically files is
1224 # fast path -- filter the other way around, since typically files is
1225 # much smaller than dmap
1225 # much smaller than dmap
1226 return [f for f in files if f in dmap]
1226 return [f for f in files if f in dmap]
1227 if match.prefix() and all(fn in dmap for fn in files):
1227 if match.prefix() and all(fn in dmap for fn in files):
1228 # fast path -- all the values are known to be files, so just return
1228 # fast path -- all the values are known to be files, so just return
1229 # that
1229 # that
1230 return list(files)
1230 return list(files)
1231 return [f for f in dmap if match(f)]
1231 return [f for f in dmap if match(f)]
1232
1232
1233 def _actualfilename(self, tr):
1233 def _actualfilename(self, tr):
1234 if tr:
1234 if tr:
1235 return self._pendingfilename
1235 return self._pendingfilename
1236 else:
1236 else:
1237 return self._filename
1237 return self._filename
1238
1238
1239 def savebackup(self, tr, suffix='', prefix=''):
1239 def savebackup(self, tr, suffix='', prefix=''):
1240 '''Save current dirstate into backup file with suffix'''
1240 '''Save current dirstate into backup file with suffix'''
1241 assert len(suffix) > 0 or len(prefix) > 0
1241 assert len(suffix) > 0 or len(prefix) > 0
1242 filename = self._actualfilename(tr)
1242 filename = self._actualfilename(tr)
1243
1243
1244 # use '_writedirstate' instead of 'write' to write changes certainly,
1244 # use '_writedirstate' instead of 'write' to write changes certainly,
1245 # because the latter omits writing out if transaction is running.
1245 # because the latter omits writing out if transaction is running.
1246 # output file will be used to create backup of dirstate at this point.
1246 # output file will be used to create backup of dirstate at this point.
1247 if self._dirty or not self._opener.exists(filename):
1247 if self._dirty or not self._opener.exists(filename):
1248 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1248 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1249 checkambig=True))
1249 checkambig=True))
1250
1250
1251 if tr:
1251 if tr:
1252 # ensure that subsequent tr.writepending returns True for
1252 # ensure that subsequent tr.writepending returns True for
1253 # changes written out above, even if dirstate is never
1253 # changes written out above, even if dirstate is never
1254 # changed after this
1254 # changed after this
1255 tr.addfilegenerator('dirstate', (self._filename,),
1255 tr.addfilegenerator('dirstate', (self._filename,),
1256 self._writedirstate, location='plain')
1256 self._writedirstate, location='plain')
1257
1257
1258 # ensure that pending file written above is unlinked at
1258 # ensure that pending file written above is unlinked at
1259 # failure, even if tr.writepending isn't invoked until the
1259 # failure, even if tr.writepending isn't invoked until the
1260 # end of this transaction
1260 # end of this transaction
1261 tr.registertmp(filename, location='plain')
1261 tr.registertmp(filename, location='plain')
1262
1262
1263 backupname = prefix + self._filename + suffix
1263 backupname = prefix + self._filename + suffix
1264 assert backupname != filename
1264 assert backupname != filename
1265 if self._opener.exists(backupname):
1265 self._opener.tryunlink(backupname)
1266 self._opener.unlink(backupname)
1267 # hardlink backup is okay because _writedirstate is always called
1266 # hardlink backup is okay because _writedirstate is always called
1268 # with an "atomictemp=True" file.
1267 # with an "atomictemp=True" file.
1269 util.copyfile(self._opener.join(filename),
1268 util.copyfile(self._opener.join(filename),
1270 self._opener.join(backupname), hardlink=True)
1269 self._opener.join(backupname), hardlink=True)
1271
1270
1272 def restorebackup(self, tr, suffix='', prefix=''):
1271 def restorebackup(self, tr, suffix='', prefix=''):
1273 '''Restore dirstate by backup file with suffix'''
1272 '''Restore dirstate by backup file with suffix'''
1274 assert len(suffix) > 0 or len(prefix) > 0
1273 assert len(suffix) > 0 or len(prefix) > 0
1275 # this "invalidate()" prevents "wlock.release()" from writing
1274 # this "invalidate()" prevents "wlock.release()" from writing
1276 # changes of dirstate out after restoring from backup file
1275 # changes of dirstate out after restoring from backup file
1277 self.invalidate()
1276 self.invalidate()
1278 filename = self._actualfilename(tr)
1277 filename = self._actualfilename(tr)
1279 # using self._filename to avoid having "pending" in the backup filename
1278 # using self._filename to avoid having "pending" in the backup filename
1280 self._opener.rename(prefix + self._filename + suffix, filename,
1279 self._opener.rename(prefix + self._filename + suffix, filename,
1281 checkambig=True)
1280 checkambig=True)
1282
1281
1283 def clearbackup(self, tr, suffix='', prefix=''):
1282 def clearbackup(self, tr, suffix='', prefix=''):
1284 '''Clear backup file with suffix'''
1283 '''Clear backup file with suffix'''
1285 assert len(suffix) > 0 or len(prefix) > 0
1284 assert len(suffix) > 0 or len(prefix) > 0
1286 # using self._filename to avoid having "pending" in the backup filename
1285 # using self._filename to avoid having "pending" in the backup filename
1287 self._opener.unlink(prefix + self._filename + suffix)
1286 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now