##// END OF EJS Templates
dirstate: add callback to notify extensions about wd parent change...
Mateusz Kwapich -
r29772:2ebd507e default
parent child Browse files
Show More
@@ -1,1241 +1,1264 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29 filecache = scmutil.filecache
29 filecache = scmutil.filecache
30 _rangemask = 0x7fffffff
30 _rangemask = 0x7fffffff
31
31
32 dirstatetuple = parsers.dirstatetuple
32 dirstatetuple = parsers.dirstatetuple
33
33
34 class repocache(filecache):
34 class repocache(filecache):
35 """filecache for files in .hg/"""
35 """filecache for files in .hg/"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj._opener.join(fname)
37 return obj._opener.join(fname)
38
38
39 class rootcache(filecache):
39 class rootcache(filecache):
40 """filecache for files in the repository root"""
40 """filecache for files in the repository root"""
41 def join(self, obj, fname):
41 def join(self, obj, fname):
42 return obj._join(fname)
42 return obj._join(fname)
43
43
44 def _getfsnow(vfs):
44 def _getfsnow(vfs):
45 '''Get "now" timestamp on filesystem'''
45 '''Get "now" timestamp on filesystem'''
46 tmpfd, tmpname = vfs.mkstemp()
46 tmpfd, tmpname = vfs.mkstemp()
47 try:
47 try:
48 return os.fstat(tmpfd).st_mtime
48 return os.fstat(tmpfd).st_mtime
49 finally:
49 finally:
50 os.close(tmpfd)
50 os.close(tmpfd)
51 vfs.unlink(tmpname)
51 vfs.unlink(tmpname)
52
52
53 def nonnormalentries(dmap):
53 def nonnormalentries(dmap):
54 '''Compute the nonnormal dirstate entries from the dmap'''
54 '''Compute the nonnormal dirstate entries from the dmap'''
55 try:
55 try:
56 return parsers.nonnormalentries(dmap)
56 return parsers.nonnormalentries(dmap)
57 except AttributeError:
57 except AttributeError:
58 return set(fname for fname, e in dmap.iteritems()
58 return set(fname for fname, e in dmap.iteritems()
59 if e[0] != 'n' or e[3] == -1)
59 if e[0] != 'n' or e[3] == -1)
60
60
61 def _trypending(root, vfs, filename):
61 def _trypending(root, vfs, filename):
62 '''Open file to be read according to HG_PENDING environment variable
62 '''Open file to be read according to HG_PENDING environment variable
63
63
64 This opens '.pending' of specified 'filename' only when HG_PENDING
64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 is equal to 'root'.
65 is equal to 'root'.
66
66
67 This returns '(fp, is_pending_opened)' tuple.
67 This returns '(fp, is_pending_opened)' tuple.
68 '''
68 '''
69 if root == os.environ.get('HG_PENDING'):
69 if root == os.environ.get('HG_PENDING'):
70 try:
70 try:
71 return (vfs('%s.pending' % filename), True)
71 return (vfs('%s.pending' % filename), True)
72 except IOError as inst:
72 except IOError as inst:
73 if inst.errno != errno.ENOENT:
73 if inst.errno != errno.ENOENT:
74 raise
74 raise
75 return (vfs(filename), False)
75 return (vfs(filename), False)
76
76
77 class dirstate(object):
77 class dirstate(object):
78
78
79 def __init__(self, opener, ui, root, validate):
79 def __init__(self, opener, ui, root, validate):
80 '''Create a new dirstate object.
80 '''Create a new dirstate object.
81
81
82 opener is an open()-like callable that can be used to open the
82 opener is an open()-like callable that can be used to open the
83 dirstate file; root is the root of the directory tracked by
83 dirstate file; root is the root of the directory tracked by
84 the dirstate.
84 the dirstate.
85 '''
85 '''
86 self._opener = opener
86 self._opener = opener
87 self._validate = validate
87 self._validate = validate
88 self._root = root
88 self._root = root
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # UNC path pointing to root share (issue4557)
90 # UNC path pointing to root share (issue4557)
91 self._rootdir = pathutil.normasprefix(root)
91 self._rootdir = pathutil.normasprefix(root)
92 # internal config: ui.forcecwd
92 # internal config: ui.forcecwd
93 forcecwd = ui.config('ui', 'forcecwd')
93 forcecwd = ui.config('ui', 'forcecwd')
94 if forcecwd:
94 if forcecwd:
95 self._cwd = forcecwd
95 self._cwd = forcecwd
96 self._dirty = False
96 self._dirty = False
97 self._dirtypl = False
97 self._dirtypl = False
98 self._lastnormaltime = 0
98 self._lastnormaltime = 0
99 self._ui = ui
99 self._ui = ui
100 self._filecache = {}
100 self._filecache = {}
101 self._parentwriters = 0
101 self._parentwriters = 0
102 self._filename = 'dirstate'
102 self._filename = 'dirstate'
103 self._pendingfilename = '%s.pending' % self._filename
103 self._pendingfilename = '%s.pending' % self._filename
104 self._plchangecallbacks = {}
105 self._origpl = None
104
106
105 # for consistent view between _pl() and _read() invocations
107 # for consistent view between _pl() and _read() invocations
106 self._pendingmode = None
108 self._pendingmode = None
107
109
108 def beginparentchange(self):
110 def beginparentchange(self):
109 '''Marks the beginning of a set of changes that involve changing
111 '''Marks the beginning of a set of changes that involve changing
110 the dirstate parents. If there is an exception during this time,
112 the dirstate parents. If there is an exception during this time,
111 the dirstate will not be written when the wlock is released. This
113 the dirstate will not be written when the wlock is released. This
112 prevents writing an incoherent dirstate where the parent doesn't
114 prevents writing an incoherent dirstate where the parent doesn't
113 match the contents.
115 match the contents.
114 '''
116 '''
115 self._parentwriters += 1
117 self._parentwriters += 1
116
118
117 def endparentchange(self):
119 def endparentchange(self):
118 '''Marks the end of a set of changes that involve changing the
120 '''Marks the end of a set of changes that involve changing the
119 dirstate parents. Once all parent changes have been marked done,
121 dirstate parents. Once all parent changes have been marked done,
120 the wlock will be free to write the dirstate on release.
122 the wlock will be free to write the dirstate on release.
121 '''
123 '''
122 if self._parentwriters > 0:
124 if self._parentwriters > 0:
123 self._parentwriters -= 1
125 self._parentwriters -= 1
124
126
125 def pendingparentchange(self):
127 def pendingparentchange(self):
126 '''Returns true if the dirstate is in the middle of a set of changes
128 '''Returns true if the dirstate is in the middle of a set of changes
127 that modify the dirstate parent.
129 that modify the dirstate parent.
128 '''
130 '''
129 return self._parentwriters > 0
131 return self._parentwriters > 0
130
132
131 @propertycache
133 @propertycache
132 def _map(self):
134 def _map(self):
133 '''Return the dirstate contents as a map from filename to
135 '''Return the dirstate contents as a map from filename to
134 (state, mode, size, time).'''
136 (state, mode, size, time).'''
135 self._read()
137 self._read()
136 return self._map
138 return self._map
137
139
138 @propertycache
140 @propertycache
139 def _copymap(self):
141 def _copymap(self):
140 self._read()
142 self._read()
141 return self._copymap
143 return self._copymap
142
144
143 @propertycache
145 @propertycache
144 def _nonnormalset(self):
146 def _nonnormalset(self):
145 return nonnormalentries(self._map)
147 return nonnormalentries(self._map)
146
148
147 @propertycache
149 @propertycache
148 def _filefoldmap(self):
150 def _filefoldmap(self):
149 try:
151 try:
150 makefilefoldmap = parsers.make_file_foldmap
152 makefilefoldmap = parsers.make_file_foldmap
151 except AttributeError:
153 except AttributeError:
152 pass
154 pass
153 else:
155 else:
154 return makefilefoldmap(self._map, util.normcasespec,
156 return makefilefoldmap(self._map, util.normcasespec,
155 util.normcasefallback)
157 util.normcasefallback)
156
158
157 f = {}
159 f = {}
158 normcase = util.normcase
160 normcase = util.normcase
159 for name, s in self._map.iteritems():
161 for name, s in self._map.iteritems():
160 if s[0] != 'r':
162 if s[0] != 'r':
161 f[normcase(name)] = name
163 f[normcase(name)] = name
162 f['.'] = '.' # prevents useless util.fspath() invocation
164 f['.'] = '.' # prevents useless util.fspath() invocation
163 return f
165 return f
164
166
165 @propertycache
167 @propertycache
166 def _dirfoldmap(self):
168 def _dirfoldmap(self):
167 f = {}
169 f = {}
168 normcase = util.normcase
170 normcase = util.normcase
169 for name in self._dirs:
171 for name in self._dirs:
170 f[normcase(name)] = name
172 f[normcase(name)] = name
171 return f
173 return f
172
174
173 @repocache('branch')
175 @repocache('branch')
174 def _branch(self):
176 def _branch(self):
175 try:
177 try:
176 return self._opener.read("branch").strip() or "default"
178 return self._opener.read("branch").strip() or "default"
177 except IOError as inst:
179 except IOError as inst:
178 if inst.errno != errno.ENOENT:
180 if inst.errno != errno.ENOENT:
179 raise
181 raise
180 return "default"
182 return "default"
181
183
182 @propertycache
184 @propertycache
183 def _pl(self):
185 def _pl(self):
184 try:
186 try:
185 fp = self._opendirstatefile()
187 fp = self._opendirstatefile()
186 st = fp.read(40)
188 st = fp.read(40)
187 fp.close()
189 fp.close()
188 l = len(st)
190 l = len(st)
189 if l == 40:
191 if l == 40:
190 return st[:20], st[20:40]
192 return st[:20], st[20:40]
191 elif l > 0 and l < 40:
193 elif l > 0 and l < 40:
192 raise error.Abort(_('working directory state appears damaged!'))
194 raise error.Abort(_('working directory state appears damaged!'))
193 except IOError as err:
195 except IOError as err:
194 if err.errno != errno.ENOENT:
196 if err.errno != errno.ENOENT:
195 raise
197 raise
196 return [nullid, nullid]
198 return [nullid, nullid]
197
199
198 @propertycache
200 @propertycache
199 def _dirs(self):
201 def _dirs(self):
200 return util.dirs(self._map, 'r')
202 return util.dirs(self._map, 'r')
201
203
202 def dirs(self):
204 def dirs(self):
203 return self._dirs
205 return self._dirs
204
206
205 @rootcache('.hgignore')
207 @rootcache('.hgignore')
206 def _ignore(self):
208 def _ignore(self):
207 files = self._ignorefiles()
209 files = self._ignorefiles()
208 if not files:
210 if not files:
209 return util.never
211 return util.never
210
212
211 pats = ['include:%s' % f for f in files]
213 pats = ['include:%s' % f for f in files]
212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
213
215
214 @propertycache
216 @propertycache
215 def _slash(self):
217 def _slash(self):
216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
217
219
218 @propertycache
220 @propertycache
219 def _checklink(self):
221 def _checklink(self):
220 return util.checklink(self._root)
222 return util.checklink(self._root)
221
223
222 @propertycache
224 @propertycache
223 def _checkexec(self):
225 def _checkexec(self):
224 return util.checkexec(self._root)
226 return util.checkexec(self._root)
225
227
226 @propertycache
228 @propertycache
227 def _checkcase(self):
229 def _checkcase(self):
228 return not util.checkcase(self._join('.hg'))
230 return not util.checkcase(self._join('.hg'))
229
231
230 def _join(self, f):
232 def _join(self, f):
231 # much faster than os.path.join()
233 # much faster than os.path.join()
232 # it's safe because f is always a relative path
234 # it's safe because f is always a relative path
233 return self._rootdir + f
235 return self._rootdir + f
234
236
235 def flagfunc(self, buildfallback):
237 def flagfunc(self, buildfallback):
236 if self._checklink and self._checkexec:
238 if self._checklink and self._checkexec:
237 def f(x):
239 def f(x):
238 try:
240 try:
239 st = os.lstat(self._join(x))
241 st = os.lstat(self._join(x))
240 if util.statislink(st):
242 if util.statislink(st):
241 return 'l'
243 return 'l'
242 if util.statisexec(st):
244 if util.statisexec(st):
243 return 'x'
245 return 'x'
244 except OSError:
246 except OSError:
245 pass
247 pass
246 return ''
248 return ''
247 return f
249 return f
248
250
249 fallback = buildfallback()
251 fallback = buildfallback()
250 if self._checklink:
252 if self._checklink:
251 def f(x):
253 def f(x):
252 if os.path.islink(self._join(x)):
254 if os.path.islink(self._join(x)):
253 return 'l'
255 return 'l'
254 if 'x' in fallback(x):
256 if 'x' in fallback(x):
255 return 'x'
257 return 'x'
256 return ''
258 return ''
257 return f
259 return f
258 if self._checkexec:
260 if self._checkexec:
259 def f(x):
261 def f(x):
260 if 'l' in fallback(x):
262 if 'l' in fallback(x):
261 return 'l'
263 return 'l'
262 if util.isexec(self._join(x)):
264 if util.isexec(self._join(x)):
263 return 'x'
265 return 'x'
264 return ''
266 return ''
265 return f
267 return f
266 else:
268 else:
267 return fallback
269 return fallback
268
270
269 @propertycache
271 @propertycache
270 def _cwd(self):
272 def _cwd(self):
271 return os.getcwd()
273 return os.getcwd()
272
274
273 def getcwd(self):
275 def getcwd(self):
274 '''Return the path from which a canonical path is calculated.
276 '''Return the path from which a canonical path is calculated.
275
277
276 This path should be used to resolve file patterns or to convert
278 This path should be used to resolve file patterns or to convert
277 canonical paths back to file paths for display. It shouldn't be
279 canonical paths back to file paths for display. It shouldn't be
278 used to get real file paths. Use vfs functions instead.
280 used to get real file paths. Use vfs functions instead.
279 '''
281 '''
280 cwd = self._cwd
282 cwd = self._cwd
281 if cwd == self._root:
283 if cwd == self._root:
282 return ''
284 return ''
283 # self._root ends with a path separator if self._root is '/' or 'C:\'
285 # self._root ends with a path separator if self._root is '/' or 'C:\'
284 rootsep = self._root
286 rootsep = self._root
285 if not util.endswithsep(rootsep):
287 if not util.endswithsep(rootsep):
286 rootsep += os.sep
288 rootsep += os.sep
287 if cwd.startswith(rootsep):
289 if cwd.startswith(rootsep):
288 return cwd[len(rootsep):]
290 return cwd[len(rootsep):]
289 else:
291 else:
290 # we're outside the repo. return an absolute path.
292 # we're outside the repo. return an absolute path.
291 return cwd
293 return cwd
292
294
293 def pathto(self, f, cwd=None):
295 def pathto(self, f, cwd=None):
294 if cwd is None:
296 if cwd is None:
295 cwd = self.getcwd()
297 cwd = self.getcwd()
296 path = util.pathto(self._root, cwd, f)
298 path = util.pathto(self._root, cwd, f)
297 if self._slash:
299 if self._slash:
298 return util.pconvert(path)
300 return util.pconvert(path)
299 return path
301 return path
300
302
301 def __getitem__(self, key):
303 def __getitem__(self, key):
302 '''Return the current state of key (a filename) in the dirstate.
304 '''Return the current state of key (a filename) in the dirstate.
303
305
304 States are:
306 States are:
305 n normal
307 n normal
306 m needs merging
308 m needs merging
307 r marked for removal
309 r marked for removal
308 a marked for addition
310 a marked for addition
309 ? not tracked
311 ? not tracked
310 '''
312 '''
311 return self._map.get(key, ("?",))[0]
313 return self._map.get(key, ("?",))[0]
312
314
313 def __contains__(self, key):
315 def __contains__(self, key):
314 return key in self._map
316 return key in self._map
315
317
316 def __iter__(self):
318 def __iter__(self):
317 for x in sorted(self._map):
319 for x in sorted(self._map):
318 yield x
320 yield x
319
321
320 def iteritems(self):
322 def iteritems(self):
321 return self._map.iteritems()
323 return self._map.iteritems()
322
324
323 def parents(self):
325 def parents(self):
324 return [self._validate(p) for p in self._pl]
326 return [self._validate(p) for p in self._pl]
325
327
326 def p1(self):
328 def p1(self):
327 return self._validate(self._pl[0])
329 return self._validate(self._pl[0])
328
330
329 def p2(self):
331 def p2(self):
330 return self._validate(self._pl[1])
332 return self._validate(self._pl[1])
331
333
332 def branch(self):
334 def branch(self):
333 return encoding.tolocal(self._branch)
335 return encoding.tolocal(self._branch)
334
336
335 def setparents(self, p1, p2=nullid):
337 def setparents(self, p1, p2=nullid):
336 """Set dirstate parents to p1 and p2.
338 """Set dirstate parents to p1 and p2.
337
339
338 When moving from two parents to one, 'm' merged entries a
340 When moving from two parents to one, 'm' merged entries a
339 adjusted to normal and previous copy records discarded and
341 adjusted to normal and previous copy records discarded and
340 returned by the call.
342 returned by the call.
341
343
342 See localrepo.setparents()
344 See localrepo.setparents()
343 """
345 """
344 if self._parentwriters == 0:
346 if self._parentwriters == 0:
345 raise ValueError("cannot set dirstate parent without "
347 raise ValueError("cannot set dirstate parent without "
346 "calling dirstate.beginparentchange")
348 "calling dirstate.beginparentchange")
347
349
348 self._dirty = self._dirtypl = True
350 self._dirty = self._dirtypl = True
349 oldp2 = self._pl[1]
351 oldp2 = self._pl[1]
352 if self._origpl is None:
353 self._origpl = self._pl
350 self._pl = p1, p2
354 self._pl = p1, p2
351 copies = {}
355 copies = {}
352 if oldp2 != nullid and p2 == nullid:
356 if oldp2 != nullid and p2 == nullid:
353 for f, s in self._map.iteritems():
357 for f, s in self._map.iteritems():
354 # Discard 'm' markers when moving away from a merge state
358 # Discard 'm' markers when moving away from a merge state
355 if s[0] == 'm':
359 if s[0] == 'm':
356 if f in self._copymap:
360 if f in self._copymap:
357 copies[f] = self._copymap[f]
361 copies[f] = self._copymap[f]
358 self.normallookup(f)
362 self.normallookup(f)
359 # Also fix up otherparent markers
363 # Also fix up otherparent markers
360 elif s[0] == 'n' and s[2] == -2:
364 elif s[0] == 'n' and s[2] == -2:
361 if f in self._copymap:
365 if f in self._copymap:
362 copies[f] = self._copymap[f]
366 copies[f] = self._copymap[f]
363 self.add(f)
367 self.add(f)
364 return copies
368 return copies
365
369
366 def setbranch(self, branch):
370 def setbranch(self, branch):
367 self._branch = encoding.fromlocal(branch)
371 self._branch = encoding.fromlocal(branch)
368 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
372 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
369 try:
373 try:
370 f.write(self._branch + '\n')
374 f.write(self._branch + '\n')
371 f.close()
375 f.close()
372
376
373 # make sure filecache has the correct stat info for _branch after
377 # make sure filecache has the correct stat info for _branch after
374 # replacing the underlying file
378 # replacing the underlying file
375 ce = self._filecache['_branch']
379 ce = self._filecache['_branch']
376 if ce:
380 if ce:
377 ce.refresh()
381 ce.refresh()
378 except: # re-raises
382 except: # re-raises
379 f.discard()
383 f.discard()
380 raise
384 raise
381
385
382 def _opendirstatefile(self):
386 def _opendirstatefile(self):
383 fp, mode = _trypending(self._root, self._opener, self._filename)
387 fp, mode = _trypending(self._root, self._opener, self._filename)
384 if self._pendingmode is not None and self._pendingmode != mode:
388 if self._pendingmode is not None and self._pendingmode != mode:
385 fp.close()
389 fp.close()
386 raise error.Abort(_('working directory state may be '
390 raise error.Abort(_('working directory state may be '
387 'changed parallelly'))
391 'changed parallelly'))
388 self._pendingmode = mode
392 self._pendingmode = mode
389 return fp
393 return fp
390
394
391 def _read(self):
395 def _read(self):
392 self._map = {}
396 self._map = {}
393 self._copymap = {}
397 self._copymap = {}
394 try:
398 try:
395 fp = self._opendirstatefile()
399 fp = self._opendirstatefile()
396 try:
400 try:
397 st = fp.read()
401 st = fp.read()
398 finally:
402 finally:
399 fp.close()
403 fp.close()
400 except IOError as err:
404 except IOError as err:
401 if err.errno != errno.ENOENT:
405 if err.errno != errno.ENOENT:
402 raise
406 raise
403 return
407 return
404 if not st:
408 if not st:
405 return
409 return
406
410
407 if util.safehasattr(parsers, 'dict_new_presized'):
411 if util.safehasattr(parsers, 'dict_new_presized'):
408 # Make an estimate of the number of files in the dirstate based on
412 # Make an estimate of the number of files in the dirstate based on
409 # its size. From a linear regression on a set of real-world repos,
413 # its size. From a linear regression on a set of real-world repos,
410 # all over 10,000 files, the size of a dirstate entry is 85
414 # all over 10,000 files, the size of a dirstate entry is 85
411 # bytes. The cost of resizing is significantly higher than the cost
415 # bytes. The cost of resizing is significantly higher than the cost
412 # of filling in a larger presized dict, so subtract 20% from the
416 # of filling in a larger presized dict, so subtract 20% from the
413 # size.
417 # size.
414 #
418 #
415 # This heuristic is imperfect in many ways, so in a future dirstate
419 # This heuristic is imperfect in many ways, so in a future dirstate
416 # format update it makes sense to just record the number of entries
420 # format update it makes sense to just record the number of entries
417 # on write.
421 # on write.
418 self._map = parsers.dict_new_presized(len(st) / 71)
422 self._map = parsers.dict_new_presized(len(st) / 71)
419
423
420 # Python's garbage collector triggers a GC each time a certain number
424 # Python's garbage collector triggers a GC each time a certain number
421 # of container objects (the number being defined by
425 # of container objects (the number being defined by
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 # for each file in the dirstate. The C version then immediately marks
427 # for each file in the dirstate. The C version then immediately marks
424 # them as not to be tracked by the collector. However, this has no
428 # them as not to be tracked by the collector. However, this has no
425 # effect on when GCs are triggered, only on what objects the GC looks
429 # effect on when GCs are triggered, only on what objects the GC looks
426 # into. This means that O(number of files) GCs are unavoidable.
430 # into. This means that O(number of files) GCs are unavoidable.
427 # Depending on when in the process's lifetime the dirstate is parsed,
431 # Depending on when in the process's lifetime the dirstate is parsed,
428 # this can get very expensive. As a workaround, disable GC while
432 # this can get very expensive. As a workaround, disable GC while
429 # parsing the dirstate.
433 # parsing the dirstate.
430 #
434 #
431 # (we cannot decorate the function directly since it is in a C module)
435 # (we cannot decorate the function directly since it is in a C module)
432 parse_dirstate = util.nogc(parsers.parse_dirstate)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 p = parse_dirstate(self._map, self._copymap, st)
437 p = parse_dirstate(self._map, self._copymap, st)
434 if not self._dirtypl:
438 if not self._dirtypl:
435 self._pl = p
439 self._pl = p
436
440
437 def invalidate(self):
441 def invalidate(self):
438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
439 "_pl", "_dirs", "_ignore", "_nonnormalset"):
443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
440 if a in self.__dict__:
444 if a in self.__dict__:
441 delattr(self, a)
445 delattr(self, a)
442 self._lastnormaltime = 0
446 self._lastnormaltime = 0
443 self._dirty = False
447 self._dirty = False
444 self._parentwriters = 0
448 self._parentwriters = 0
449 self._origpl = None
445
450
446 def copy(self, source, dest):
451 def copy(self, source, dest):
447 """Mark dest as a copy of source. Unmark dest if source is None."""
452 """Mark dest as a copy of source. Unmark dest if source is None."""
448 if source == dest:
453 if source == dest:
449 return
454 return
450 self._dirty = True
455 self._dirty = True
451 if source is not None:
456 if source is not None:
452 self._copymap[dest] = source
457 self._copymap[dest] = source
453 elif dest in self._copymap:
458 elif dest in self._copymap:
454 del self._copymap[dest]
459 del self._copymap[dest]
455
460
456 def copied(self, file):
461 def copied(self, file):
457 return self._copymap.get(file, None)
462 return self._copymap.get(file, None)
458
463
459 def copies(self):
464 def copies(self):
460 return self._copymap
465 return self._copymap
461
466
462 def _droppath(self, f):
467 def _droppath(self, f):
463 if self[f] not in "?r" and "_dirs" in self.__dict__:
468 if self[f] not in "?r" and "_dirs" in self.__dict__:
464 self._dirs.delpath(f)
469 self._dirs.delpath(f)
465
470
466 if "_filefoldmap" in self.__dict__:
471 if "_filefoldmap" in self.__dict__:
467 normed = util.normcase(f)
472 normed = util.normcase(f)
468 if normed in self._filefoldmap:
473 if normed in self._filefoldmap:
469 del self._filefoldmap[normed]
474 del self._filefoldmap[normed]
470
475
471 def _addpath(self, f, state, mode, size, mtime):
476 def _addpath(self, f, state, mode, size, mtime):
472 oldstate = self[f]
477 oldstate = self[f]
473 if state == 'a' or oldstate == 'r':
478 if state == 'a' or oldstate == 'r':
474 scmutil.checkfilename(f)
479 scmutil.checkfilename(f)
475 if f in self._dirs:
480 if f in self._dirs:
476 raise error.Abort(_('directory %r already in dirstate') % f)
481 raise error.Abort(_('directory %r already in dirstate') % f)
477 # shadows
482 # shadows
478 for d in util.finddirs(f):
483 for d in util.finddirs(f):
479 if d in self._dirs:
484 if d in self._dirs:
480 break
485 break
481 if d in self._map and self[d] != 'r':
486 if d in self._map and self[d] != 'r':
482 raise error.Abort(
487 raise error.Abort(
483 _('file %r in dirstate clashes with %r') % (d, f))
488 _('file %r in dirstate clashes with %r') % (d, f))
484 if oldstate in "?r" and "_dirs" in self.__dict__:
489 if oldstate in "?r" and "_dirs" in self.__dict__:
485 self._dirs.addpath(f)
490 self._dirs.addpath(f)
486 self._dirty = True
491 self._dirty = True
487 self._map[f] = dirstatetuple(state, mode, size, mtime)
492 self._map[f] = dirstatetuple(state, mode, size, mtime)
488 if state != 'n' or mtime == -1:
493 if state != 'n' or mtime == -1:
489 self._nonnormalset.add(f)
494 self._nonnormalset.add(f)
490
495
491 def normal(self, f):
496 def normal(self, f):
492 '''Mark a file normal and clean.'''
497 '''Mark a file normal and clean.'''
493 s = os.lstat(self._join(f))
498 s = os.lstat(self._join(f))
494 mtime = s.st_mtime
499 mtime = s.st_mtime
495 self._addpath(f, 'n', s.st_mode,
500 self._addpath(f, 'n', s.st_mode,
496 s.st_size & _rangemask, mtime & _rangemask)
501 s.st_size & _rangemask, mtime & _rangemask)
497 if f in self._copymap:
502 if f in self._copymap:
498 del self._copymap[f]
503 del self._copymap[f]
499 if f in self._nonnormalset:
504 if f in self._nonnormalset:
500 self._nonnormalset.remove(f)
505 self._nonnormalset.remove(f)
501 if mtime > self._lastnormaltime:
506 if mtime > self._lastnormaltime:
502 # Remember the most recent modification timeslot for status(),
507 # Remember the most recent modification timeslot for status(),
503 # to make sure we won't miss future size-preserving file content
508 # to make sure we won't miss future size-preserving file content
504 # modifications that happen within the same timeslot.
509 # modifications that happen within the same timeslot.
505 self._lastnormaltime = mtime
510 self._lastnormaltime = mtime
506
511
507 def normallookup(self, f):
512 def normallookup(self, f):
508 '''Mark a file normal, but possibly dirty.'''
513 '''Mark a file normal, but possibly dirty.'''
509 if self._pl[1] != nullid and f in self._map:
514 if self._pl[1] != nullid and f in self._map:
510 # if there is a merge going on and the file was either
515 # if there is a merge going on and the file was either
511 # in state 'm' (-1) or coming from other parent (-2) before
516 # in state 'm' (-1) or coming from other parent (-2) before
512 # being removed, restore that state.
517 # being removed, restore that state.
513 entry = self._map[f]
518 entry = self._map[f]
514 if entry[0] == 'r' and entry[2] in (-1, -2):
519 if entry[0] == 'r' and entry[2] in (-1, -2):
515 source = self._copymap.get(f)
520 source = self._copymap.get(f)
516 if entry[2] == -1:
521 if entry[2] == -1:
517 self.merge(f)
522 self.merge(f)
518 elif entry[2] == -2:
523 elif entry[2] == -2:
519 self.otherparent(f)
524 self.otherparent(f)
520 if source:
525 if source:
521 self.copy(source, f)
526 self.copy(source, f)
522 return
527 return
523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
528 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
524 return
529 return
525 self._addpath(f, 'n', 0, -1, -1)
530 self._addpath(f, 'n', 0, -1, -1)
526 if f in self._copymap:
531 if f in self._copymap:
527 del self._copymap[f]
532 del self._copymap[f]
528 if f in self._nonnormalset:
533 if f in self._nonnormalset:
529 self._nonnormalset.remove(f)
534 self._nonnormalset.remove(f)
530
535
531 def otherparent(self, f):
536 def otherparent(self, f):
532 '''Mark as coming from the other parent, always dirty.'''
537 '''Mark as coming from the other parent, always dirty.'''
533 if self._pl[1] == nullid:
538 if self._pl[1] == nullid:
534 raise error.Abort(_("setting %r to other parent "
539 raise error.Abort(_("setting %r to other parent "
535 "only allowed in merges") % f)
540 "only allowed in merges") % f)
536 if f in self and self[f] == 'n':
541 if f in self and self[f] == 'n':
537 # merge-like
542 # merge-like
538 self._addpath(f, 'm', 0, -2, -1)
543 self._addpath(f, 'm', 0, -2, -1)
539 else:
544 else:
540 # add-like
545 # add-like
541 self._addpath(f, 'n', 0, -2, -1)
546 self._addpath(f, 'n', 0, -2, -1)
542
547
543 if f in self._copymap:
548 if f in self._copymap:
544 del self._copymap[f]
549 del self._copymap[f]
545
550
546 def add(self, f):
551 def add(self, f):
547 '''Mark a file added.'''
552 '''Mark a file added.'''
548 self._addpath(f, 'a', 0, -1, -1)
553 self._addpath(f, 'a', 0, -1, -1)
549 if f in self._copymap:
554 if f in self._copymap:
550 del self._copymap[f]
555 del self._copymap[f]
551
556
552 def remove(self, f):
557 def remove(self, f):
553 '''Mark a file removed.'''
558 '''Mark a file removed.'''
554 self._dirty = True
559 self._dirty = True
555 self._droppath(f)
560 self._droppath(f)
556 size = 0
561 size = 0
557 if self._pl[1] != nullid and f in self._map:
562 if self._pl[1] != nullid and f in self._map:
558 # backup the previous state
563 # backup the previous state
559 entry = self._map[f]
564 entry = self._map[f]
560 if entry[0] == 'm': # merge
565 if entry[0] == 'm': # merge
561 size = -1
566 size = -1
562 elif entry[0] == 'n' and entry[2] == -2: # other parent
567 elif entry[0] == 'n' and entry[2] == -2: # other parent
563 size = -2
568 size = -2
564 self._map[f] = dirstatetuple('r', 0, size, 0)
569 self._map[f] = dirstatetuple('r', 0, size, 0)
565 self._nonnormalset.add(f)
570 self._nonnormalset.add(f)
566 if size == 0 and f in self._copymap:
571 if size == 0 and f in self._copymap:
567 del self._copymap[f]
572 del self._copymap[f]
568
573
569 def merge(self, f):
574 def merge(self, f):
570 '''Mark a file merged.'''
575 '''Mark a file merged.'''
571 if self._pl[1] == nullid:
576 if self._pl[1] == nullid:
572 return self.normallookup(f)
577 return self.normallookup(f)
573 return self.otherparent(f)
578 return self.otherparent(f)
574
579
575 def drop(self, f):
580 def drop(self, f):
576 '''Drop a file from the dirstate'''
581 '''Drop a file from the dirstate'''
577 if f in self._map:
582 if f in self._map:
578 self._dirty = True
583 self._dirty = True
579 self._droppath(f)
584 self._droppath(f)
580 del self._map[f]
585 del self._map[f]
581 if f in self._nonnormalset:
586 if f in self._nonnormalset:
582 self._nonnormalset.remove(f)
587 self._nonnormalset.remove(f)
583 if f in self._copymap:
588 if f in self._copymap:
584 del self._copymap[f]
589 del self._copymap[f]
585
590
586 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
587 if exists is None:
592 if exists is None:
588 exists = os.path.lexists(os.path.join(self._root, path))
593 exists = os.path.lexists(os.path.join(self._root, path))
589 if not exists:
594 if not exists:
590 # Maybe a path component exists
595 # Maybe a path component exists
591 if not ignoremissing and '/' in path:
596 if not ignoremissing and '/' in path:
592 d, f = path.rsplit('/', 1)
597 d, f = path.rsplit('/', 1)
593 d = self._normalize(d, False, ignoremissing, None)
598 d = self._normalize(d, False, ignoremissing, None)
594 folded = d + "/" + f
599 folded = d + "/" + f
595 else:
600 else:
596 # No path components, preserve original case
601 # No path components, preserve original case
597 folded = path
602 folded = path
598 else:
603 else:
599 # recursively normalize leading directory components
604 # recursively normalize leading directory components
600 # against dirstate
605 # against dirstate
601 if '/' in normed:
606 if '/' in normed:
602 d, f = normed.rsplit('/', 1)
607 d, f = normed.rsplit('/', 1)
603 d = self._normalize(d, False, ignoremissing, True)
608 d = self._normalize(d, False, ignoremissing, True)
604 r = self._root + "/" + d
609 r = self._root + "/" + d
605 folded = d + "/" + util.fspath(f, r)
610 folded = d + "/" + util.fspath(f, r)
606 else:
611 else:
607 folded = util.fspath(normed, self._root)
612 folded = util.fspath(normed, self._root)
608 storemap[normed] = folded
613 storemap[normed] = folded
609
614
610 return folded
615 return folded
611
616
612 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
613 normed = util.normcase(path)
618 normed = util.normcase(path)
614 folded = self._filefoldmap.get(normed, None)
619 folded = self._filefoldmap.get(normed, None)
615 if folded is None:
620 if folded is None:
616 if isknown:
621 if isknown:
617 folded = path
622 folded = path
618 else:
623 else:
619 folded = self._discoverpath(path, normed, ignoremissing, exists,
624 folded = self._discoverpath(path, normed, ignoremissing, exists,
620 self._filefoldmap)
625 self._filefoldmap)
621 return folded
626 return folded
622
627
623 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
624 normed = util.normcase(path)
629 normed = util.normcase(path)
625 folded = self._filefoldmap.get(normed, None)
630 folded = self._filefoldmap.get(normed, None)
626 if folded is None:
631 if folded is None:
627 folded = self._dirfoldmap.get(normed, None)
632 folded = self._dirfoldmap.get(normed, None)
628 if folded is None:
633 if folded is None:
629 if isknown:
634 if isknown:
630 folded = path
635 folded = path
631 else:
636 else:
632 # store discovered result in dirfoldmap so that future
637 # store discovered result in dirfoldmap so that future
633 # normalizefile calls don't start matching directories
638 # normalizefile calls don't start matching directories
634 folded = self._discoverpath(path, normed, ignoremissing, exists,
639 folded = self._discoverpath(path, normed, ignoremissing, exists,
635 self._dirfoldmap)
640 self._dirfoldmap)
636 return folded
641 return folded
637
642
638 def normalize(self, path, isknown=False, ignoremissing=False):
643 def normalize(self, path, isknown=False, ignoremissing=False):
639 '''
644 '''
640 normalize the case of a pathname when on a casefolding filesystem
645 normalize the case of a pathname when on a casefolding filesystem
641
646
642 isknown specifies whether the filename came from walking the
647 isknown specifies whether the filename came from walking the
643 disk, to avoid extra filesystem access.
648 disk, to avoid extra filesystem access.
644
649
645 If ignoremissing is True, missing path are returned
650 If ignoremissing is True, missing path are returned
646 unchanged. Otherwise, we try harder to normalize possibly
651 unchanged. Otherwise, we try harder to normalize possibly
647 existing path components.
652 existing path components.
648
653
649 The normalized case is determined based on the following precedence:
654 The normalized case is determined based on the following precedence:
650
655
651 - version of name already stored in the dirstate
656 - version of name already stored in the dirstate
652 - version of name stored on disk
657 - version of name stored on disk
653 - version provided via command arguments
658 - version provided via command arguments
654 '''
659 '''
655
660
656 if self._checkcase:
661 if self._checkcase:
657 return self._normalize(path, isknown, ignoremissing)
662 return self._normalize(path, isknown, ignoremissing)
658 return path
663 return path
659
664
660 def clear(self):
665 def clear(self):
661 self._map = {}
666 self._map = {}
662 self._nonnormalset = set()
667 self._nonnormalset = set()
663 if "_dirs" in self.__dict__:
668 if "_dirs" in self.__dict__:
664 delattr(self, "_dirs")
669 delattr(self, "_dirs")
665 self._copymap = {}
670 self._copymap = {}
666 self._pl = [nullid, nullid]
671 self._pl = [nullid, nullid]
667 self._lastnormaltime = 0
672 self._lastnormaltime = 0
668 self._dirty = True
673 self._dirty = True
669
674
670 def rebuild(self, parent, allfiles, changedfiles=None):
675 def rebuild(self, parent, allfiles, changedfiles=None):
671 if changedfiles is None:
676 if changedfiles is None:
672 # Rebuild entire dirstate
677 # Rebuild entire dirstate
673 changedfiles = allfiles
678 changedfiles = allfiles
674 lastnormaltime = self._lastnormaltime
679 lastnormaltime = self._lastnormaltime
675 self.clear()
680 self.clear()
676 self._lastnormaltime = lastnormaltime
681 self._lastnormaltime = lastnormaltime
677
682
678 for f in changedfiles:
683 for f in changedfiles:
679 mode = 0o666
684 mode = 0o666
680 if f in allfiles and 'x' in allfiles.flags(f):
685 if f in allfiles and 'x' in allfiles.flags(f):
681 mode = 0o777
686 mode = 0o777
682
687
683 if f in allfiles:
688 if f in allfiles:
684 self._map[f] = dirstatetuple('n', mode, -1, 0)
689 self._map[f] = dirstatetuple('n', mode, -1, 0)
685 else:
690 else:
686 self._map.pop(f, None)
691 self._map.pop(f, None)
687 if f in self._nonnormalset:
692 if f in self._nonnormalset:
688 self._nonnormalset.remove(f)
693 self._nonnormalset.remove(f)
689
694
695 if self._origpl is None:
696 self._origpl = self._pl
690 self._pl = (parent, nullid)
697 self._pl = (parent, nullid)
691 self._dirty = True
698 self._dirty = True
692
699
693 def write(self, tr):
700 def write(self, tr):
694 if not self._dirty:
701 if not self._dirty:
695 return
702 return
696
703
697 filename = self._filename
704 filename = self._filename
698 if tr:
705 if tr:
699 # 'dirstate.write()' is not only for writing in-memory
706 # 'dirstate.write()' is not only for writing in-memory
700 # changes out, but also for dropping ambiguous timestamp.
707 # changes out, but also for dropping ambiguous timestamp.
701 # delayed writing re-raise "ambiguous timestamp issue".
708 # delayed writing re-raise "ambiguous timestamp issue".
702 # See also the wiki page below for detail:
709 # See also the wiki page below for detail:
703 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
710 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
704
711
705 # emulate dropping timestamp in 'parsers.pack_dirstate'
712 # emulate dropping timestamp in 'parsers.pack_dirstate'
706 now = _getfsnow(self._opener)
713 now = _getfsnow(self._opener)
707 dmap = self._map
714 dmap = self._map
708 for f, e in dmap.iteritems():
715 for f, e in dmap.iteritems():
709 if e[0] == 'n' and e[3] == now:
716 if e[0] == 'n' and e[3] == now:
710 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
717 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
711 self._nonnormalset.add(f)
718 self._nonnormalset.add(f)
712
719
713 # emulate that all 'dirstate.normal' results are written out
720 # emulate that all 'dirstate.normal' results are written out
714 self._lastnormaltime = 0
721 self._lastnormaltime = 0
715
722
716 # delay writing in-memory changes out
723 # delay writing in-memory changes out
717 tr.addfilegenerator('dirstate', (self._filename,),
724 tr.addfilegenerator('dirstate', (self._filename,),
718 self._writedirstate, location='plain')
725 self._writedirstate, location='plain')
719 return
726 return
720
727
721 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
728 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
722 self._writedirstate(st)
729 self._writedirstate(st)
723
730
731 def addparentchangecallback(self, category, callback):
732 """add a callback to be called when the wd parents are changed
733
734 Callback will be called with the following arguments:
735 dirstate, (oldp1, oldp2), (newp1, newp2)
736
737 Category is a unique identifier to allow overwriting an old callback
738 with a newer callback.
739 """
740 self._plchangecallbacks[category] = callback
741
724 def _writedirstate(self, st):
742 def _writedirstate(self, st):
743 # notify callbacks about parents change
744 if self._origpl is not None and self._origpl != self._pl:
745 for c, callback in sorted(self._plchangecallbacks.iteritems()):
746 callback(self, self._origpl, self._pl)
747 self._origpl = None
725 # use the modification time of the newly created temporary file as the
748 # use the modification time of the newly created temporary file as the
726 # filesystem's notion of 'now'
749 # filesystem's notion of 'now'
727 now = util.fstat(st).st_mtime & _rangemask
750 now = util.fstat(st).st_mtime & _rangemask
728
751
729 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
752 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
730 # timestamp of each entries in dirstate, because of 'now > mtime'
753 # timestamp of each entries in dirstate, because of 'now > mtime'
731 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
754 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
732 if delaywrite > 0:
755 if delaywrite > 0:
733 # do we have any files to delay for?
756 # do we have any files to delay for?
734 for f, e in self._map.iteritems():
757 for f, e in self._map.iteritems():
735 if e[0] == 'n' and e[3] == now:
758 if e[0] == 'n' and e[3] == now:
736 import time # to avoid useless import
759 import time # to avoid useless import
737 # rather than sleep n seconds, sleep until the next
760 # rather than sleep n seconds, sleep until the next
738 # multiple of n seconds
761 # multiple of n seconds
739 clock = time.time()
762 clock = time.time()
740 start = int(clock) - (int(clock) % delaywrite)
763 start = int(clock) - (int(clock) % delaywrite)
741 end = start + delaywrite
764 end = start + delaywrite
742 time.sleep(end - clock)
765 time.sleep(end - clock)
743 break
766 break
744
767
745 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
768 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
746 self._nonnormalset = nonnormalentries(self._map)
769 self._nonnormalset = nonnormalentries(self._map)
747 st.close()
770 st.close()
748 self._lastnormaltime = 0
771 self._lastnormaltime = 0
749 self._dirty = self._dirtypl = False
772 self._dirty = self._dirtypl = False
750
773
751 def _dirignore(self, f):
774 def _dirignore(self, f):
752 if f == '.':
775 if f == '.':
753 return False
776 return False
754 if self._ignore(f):
777 if self._ignore(f):
755 return True
778 return True
756 for p in util.finddirs(f):
779 for p in util.finddirs(f):
757 if self._ignore(p):
780 if self._ignore(p):
758 return True
781 return True
759 return False
782 return False
760
783
761 def _ignorefiles(self):
784 def _ignorefiles(self):
762 files = []
785 files = []
763 if os.path.exists(self._join('.hgignore')):
786 if os.path.exists(self._join('.hgignore')):
764 files.append(self._join('.hgignore'))
787 files.append(self._join('.hgignore'))
765 for name, path in self._ui.configitems("ui"):
788 for name, path in self._ui.configitems("ui"):
766 if name == 'ignore' or name.startswith('ignore.'):
789 if name == 'ignore' or name.startswith('ignore.'):
767 # we need to use os.path.join here rather than self._join
790 # we need to use os.path.join here rather than self._join
768 # because path is arbitrary and user-specified
791 # because path is arbitrary and user-specified
769 files.append(os.path.join(self._rootdir, util.expandpath(path)))
792 files.append(os.path.join(self._rootdir, util.expandpath(path)))
770 return files
793 return files
771
794
772 def _ignorefileandline(self, f):
795 def _ignorefileandline(self, f):
773 files = collections.deque(self._ignorefiles())
796 files = collections.deque(self._ignorefiles())
774 visited = set()
797 visited = set()
775 while files:
798 while files:
776 i = files.popleft()
799 i = files.popleft()
777 patterns = matchmod.readpatternfile(i, self._ui.warn,
800 patterns = matchmod.readpatternfile(i, self._ui.warn,
778 sourceinfo=True)
801 sourceinfo=True)
779 for pattern, lineno, line in patterns:
802 for pattern, lineno, line in patterns:
780 kind, p = matchmod._patsplit(pattern, 'glob')
803 kind, p = matchmod._patsplit(pattern, 'glob')
781 if kind == "subinclude":
804 if kind == "subinclude":
782 if p not in visited:
805 if p not in visited:
783 files.append(p)
806 files.append(p)
784 continue
807 continue
785 m = matchmod.match(self._root, '', [], [pattern],
808 m = matchmod.match(self._root, '', [], [pattern],
786 warn=self._ui.warn)
809 warn=self._ui.warn)
787 if m(f):
810 if m(f):
788 return (i, lineno, line)
811 return (i, lineno, line)
789 visited.add(i)
812 visited.add(i)
790 return (None, -1, "")
813 return (None, -1, "")
791
814
792 def _walkexplicit(self, match, subrepos):
815 def _walkexplicit(self, match, subrepos):
793 '''Get stat data about the files explicitly specified by match.
816 '''Get stat data about the files explicitly specified by match.
794
817
795 Return a triple (results, dirsfound, dirsnotfound).
818 Return a triple (results, dirsfound, dirsnotfound).
796 - results is a mapping from filename to stat result. It also contains
819 - results is a mapping from filename to stat result. It also contains
797 listings mapping subrepos and .hg to None.
820 listings mapping subrepos and .hg to None.
798 - dirsfound is a list of files found to be directories.
821 - dirsfound is a list of files found to be directories.
799 - dirsnotfound is a list of files that the dirstate thinks are
822 - dirsnotfound is a list of files that the dirstate thinks are
800 directories and that were not found.'''
823 directories and that were not found.'''
801
824
802 def badtype(mode):
825 def badtype(mode):
803 kind = _('unknown')
826 kind = _('unknown')
804 if stat.S_ISCHR(mode):
827 if stat.S_ISCHR(mode):
805 kind = _('character device')
828 kind = _('character device')
806 elif stat.S_ISBLK(mode):
829 elif stat.S_ISBLK(mode):
807 kind = _('block device')
830 kind = _('block device')
808 elif stat.S_ISFIFO(mode):
831 elif stat.S_ISFIFO(mode):
809 kind = _('fifo')
832 kind = _('fifo')
810 elif stat.S_ISSOCK(mode):
833 elif stat.S_ISSOCK(mode):
811 kind = _('socket')
834 kind = _('socket')
812 elif stat.S_ISDIR(mode):
835 elif stat.S_ISDIR(mode):
813 kind = _('directory')
836 kind = _('directory')
814 return _('unsupported file type (type is %s)') % kind
837 return _('unsupported file type (type is %s)') % kind
815
838
816 matchedir = match.explicitdir
839 matchedir = match.explicitdir
817 badfn = match.bad
840 badfn = match.bad
818 dmap = self._map
841 dmap = self._map
819 lstat = os.lstat
842 lstat = os.lstat
820 getkind = stat.S_IFMT
843 getkind = stat.S_IFMT
821 dirkind = stat.S_IFDIR
844 dirkind = stat.S_IFDIR
822 regkind = stat.S_IFREG
845 regkind = stat.S_IFREG
823 lnkkind = stat.S_IFLNK
846 lnkkind = stat.S_IFLNK
824 join = self._join
847 join = self._join
825 dirsfound = []
848 dirsfound = []
826 foundadd = dirsfound.append
849 foundadd = dirsfound.append
827 dirsnotfound = []
850 dirsnotfound = []
828 notfoundadd = dirsnotfound.append
851 notfoundadd = dirsnotfound.append
829
852
830 if not match.isexact() and self._checkcase:
853 if not match.isexact() and self._checkcase:
831 normalize = self._normalize
854 normalize = self._normalize
832 else:
855 else:
833 normalize = None
856 normalize = None
834
857
835 files = sorted(match.files())
858 files = sorted(match.files())
836 subrepos.sort()
859 subrepos.sort()
837 i, j = 0, 0
860 i, j = 0, 0
838 while i < len(files) and j < len(subrepos):
861 while i < len(files) and j < len(subrepos):
839 subpath = subrepos[j] + "/"
862 subpath = subrepos[j] + "/"
840 if files[i] < subpath:
863 if files[i] < subpath:
841 i += 1
864 i += 1
842 continue
865 continue
843 while i < len(files) and files[i].startswith(subpath):
866 while i < len(files) and files[i].startswith(subpath):
844 del files[i]
867 del files[i]
845 j += 1
868 j += 1
846
869
847 if not files or '.' in files:
870 if not files or '.' in files:
848 files = ['.']
871 files = ['.']
849 results = dict.fromkeys(subrepos)
872 results = dict.fromkeys(subrepos)
850 results['.hg'] = None
873 results['.hg'] = None
851
874
852 alldirs = None
875 alldirs = None
853 for ff in files:
876 for ff in files:
854 # constructing the foldmap is expensive, so don't do it for the
877 # constructing the foldmap is expensive, so don't do it for the
855 # common case where files is ['.']
878 # common case where files is ['.']
856 if normalize and ff != '.':
879 if normalize and ff != '.':
857 nf = normalize(ff, False, True)
880 nf = normalize(ff, False, True)
858 else:
881 else:
859 nf = ff
882 nf = ff
860 if nf in results:
883 if nf in results:
861 continue
884 continue
862
885
863 try:
886 try:
864 st = lstat(join(nf))
887 st = lstat(join(nf))
865 kind = getkind(st.st_mode)
888 kind = getkind(st.st_mode)
866 if kind == dirkind:
889 if kind == dirkind:
867 if nf in dmap:
890 if nf in dmap:
868 # file replaced by dir on disk but still in dirstate
891 # file replaced by dir on disk but still in dirstate
869 results[nf] = None
892 results[nf] = None
870 if matchedir:
893 if matchedir:
871 matchedir(nf)
894 matchedir(nf)
872 foundadd((nf, ff))
895 foundadd((nf, ff))
873 elif kind == regkind or kind == lnkkind:
896 elif kind == regkind or kind == lnkkind:
874 results[nf] = st
897 results[nf] = st
875 else:
898 else:
876 badfn(ff, badtype(kind))
899 badfn(ff, badtype(kind))
877 if nf in dmap:
900 if nf in dmap:
878 results[nf] = None
901 results[nf] = None
879 except OSError as inst: # nf not found on disk - it is dirstate only
902 except OSError as inst: # nf not found on disk - it is dirstate only
880 if nf in dmap: # does it exactly match a missing file?
903 if nf in dmap: # does it exactly match a missing file?
881 results[nf] = None
904 results[nf] = None
882 else: # does it match a missing directory?
905 else: # does it match a missing directory?
883 if alldirs is None:
906 if alldirs is None:
884 alldirs = util.dirs(dmap)
907 alldirs = util.dirs(dmap)
885 if nf in alldirs:
908 if nf in alldirs:
886 if matchedir:
909 if matchedir:
887 matchedir(nf)
910 matchedir(nf)
888 notfoundadd(nf)
911 notfoundadd(nf)
889 else:
912 else:
890 badfn(ff, inst.strerror)
913 badfn(ff, inst.strerror)
891
914
892 # Case insensitive filesystems cannot rely on lstat() failing to detect
915 # Case insensitive filesystems cannot rely on lstat() failing to detect
893 # a case-only rename. Prune the stat object for any file that does not
916 # a case-only rename. Prune the stat object for any file that does not
894 # match the case in the filesystem, if there are multiple files that
917 # match the case in the filesystem, if there are multiple files that
895 # normalize to the same path.
918 # normalize to the same path.
896 if match.isexact() and self._checkcase:
919 if match.isexact() and self._checkcase:
897 normed = {}
920 normed = {}
898
921
899 for f, st in results.iteritems():
922 for f, st in results.iteritems():
900 if st is None:
923 if st is None:
901 continue
924 continue
902
925
903 nc = util.normcase(f)
926 nc = util.normcase(f)
904 paths = normed.get(nc)
927 paths = normed.get(nc)
905
928
906 if paths is None:
929 if paths is None:
907 paths = set()
930 paths = set()
908 normed[nc] = paths
931 normed[nc] = paths
909
932
910 paths.add(f)
933 paths.add(f)
911
934
912 for norm, paths in normed.iteritems():
935 for norm, paths in normed.iteritems():
913 if len(paths) > 1:
936 if len(paths) > 1:
914 for path in paths:
937 for path in paths:
915 folded = self._discoverpath(path, norm, True, None,
938 folded = self._discoverpath(path, norm, True, None,
916 self._dirfoldmap)
939 self._dirfoldmap)
917 if path != folded:
940 if path != folded:
918 results[path] = None
941 results[path] = None
919
942
920 return results, dirsfound, dirsnotfound
943 return results, dirsfound, dirsnotfound
921
944
922 def walk(self, match, subrepos, unknown, ignored, full=True):
945 def walk(self, match, subrepos, unknown, ignored, full=True):
923 '''
946 '''
924 Walk recursively through the directory tree, finding all files
947 Walk recursively through the directory tree, finding all files
925 matched by match.
948 matched by match.
926
949
927 If full is False, maybe skip some known-clean files.
950 If full is False, maybe skip some known-clean files.
928
951
929 Return a dict mapping filename to stat-like object (either
952 Return a dict mapping filename to stat-like object (either
930 mercurial.osutil.stat instance or return value of os.stat()).
953 mercurial.osutil.stat instance or return value of os.stat()).
931
954
932 '''
955 '''
933 # full is a flag that extensions that hook into walk can use -- this
956 # full is a flag that extensions that hook into walk can use -- this
934 # implementation doesn't use it at all. This satisfies the contract
957 # implementation doesn't use it at all. This satisfies the contract
935 # because we only guarantee a "maybe".
958 # because we only guarantee a "maybe".
936
959
937 if ignored:
960 if ignored:
938 ignore = util.never
961 ignore = util.never
939 dirignore = util.never
962 dirignore = util.never
940 elif unknown:
963 elif unknown:
941 ignore = self._ignore
964 ignore = self._ignore
942 dirignore = self._dirignore
965 dirignore = self._dirignore
943 else:
966 else:
944 # if not unknown and not ignored, drop dir recursion and step 2
967 # if not unknown and not ignored, drop dir recursion and step 2
945 ignore = util.always
968 ignore = util.always
946 dirignore = util.always
969 dirignore = util.always
947
970
948 matchfn = match.matchfn
971 matchfn = match.matchfn
949 matchalways = match.always()
972 matchalways = match.always()
950 matchtdir = match.traversedir
973 matchtdir = match.traversedir
951 dmap = self._map
974 dmap = self._map
952 listdir = osutil.listdir
975 listdir = osutil.listdir
953 lstat = os.lstat
976 lstat = os.lstat
954 dirkind = stat.S_IFDIR
977 dirkind = stat.S_IFDIR
955 regkind = stat.S_IFREG
978 regkind = stat.S_IFREG
956 lnkkind = stat.S_IFLNK
979 lnkkind = stat.S_IFLNK
957 join = self._join
980 join = self._join
958
981
959 exact = skipstep3 = False
982 exact = skipstep3 = False
960 if match.isexact(): # match.exact
983 if match.isexact(): # match.exact
961 exact = True
984 exact = True
962 dirignore = util.always # skip step 2
985 dirignore = util.always # skip step 2
963 elif match.prefix(): # match.match, no patterns
986 elif match.prefix(): # match.match, no patterns
964 skipstep3 = True
987 skipstep3 = True
965
988
966 if not exact and self._checkcase:
989 if not exact and self._checkcase:
967 normalize = self._normalize
990 normalize = self._normalize
968 normalizefile = self._normalizefile
991 normalizefile = self._normalizefile
969 skipstep3 = False
992 skipstep3 = False
970 else:
993 else:
971 normalize = self._normalize
994 normalize = self._normalize
972 normalizefile = None
995 normalizefile = None
973
996
974 # step 1: find all explicit files
997 # step 1: find all explicit files
975 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
998 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
976
999
977 skipstep3 = skipstep3 and not (work or dirsnotfound)
1000 skipstep3 = skipstep3 and not (work or dirsnotfound)
978 work = [d for d in work if not dirignore(d[0])]
1001 work = [d for d in work if not dirignore(d[0])]
979
1002
980 # step 2: visit subdirectories
1003 # step 2: visit subdirectories
981 def traverse(work, alreadynormed):
1004 def traverse(work, alreadynormed):
982 wadd = work.append
1005 wadd = work.append
983 while work:
1006 while work:
984 nd = work.pop()
1007 nd = work.pop()
985 skip = None
1008 skip = None
986 if nd == '.':
1009 if nd == '.':
987 nd = ''
1010 nd = ''
988 else:
1011 else:
989 skip = '.hg'
1012 skip = '.hg'
990 try:
1013 try:
991 entries = listdir(join(nd), stat=True, skip=skip)
1014 entries = listdir(join(nd), stat=True, skip=skip)
992 except OSError as inst:
1015 except OSError as inst:
993 if inst.errno in (errno.EACCES, errno.ENOENT):
1016 if inst.errno in (errno.EACCES, errno.ENOENT):
994 match.bad(self.pathto(nd), inst.strerror)
1017 match.bad(self.pathto(nd), inst.strerror)
995 continue
1018 continue
996 raise
1019 raise
997 for f, kind, st in entries:
1020 for f, kind, st in entries:
998 if normalizefile:
1021 if normalizefile:
999 # even though f might be a directory, we're only
1022 # even though f might be a directory, we're only
1000 # interested in comparing it to files currently in the
1023 # interested in comparing it to files currently in the
1001 # dmap -- therefore normalizefile is enough
1024 # dmap -- therefore normalizefile is enough
1002 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1025 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1003 True)
1026 True)
1004 else:
1027 else:
1005 nf = nd and (nd + "/" + f) or f
1028 nf = nd and (nd + "/" + f) or f
1006 if nf not in results:
1029 if nf not in results:
1007 if kind == dirkind:
1030 if kind == dirkind:
1008 if not ignore(nf):
1031 if not ignore(nf):
1009 if matchtdir:
1032 if matchtdir:
1010 matchtdir(nf)
1033 matchtdir(nf)
1011 wadd(nf)
1034 wadd(nf)
1012 if nf in dmap and (matchalways or matchfn(nf)):
1035 if nf in dmap and (matchalways or matchfn(nf)):
1013 results[nf] = None
1036 results[nf] = None
1014 elif kind == regkind or kind == lnkkind:
1037 elif kind == regkind or kind == lnkkind:
1015 if nf in dmap:
1038 if nf in dmap:
1016 if matchalways or matchfn(nf):
1039 if matchalways or matchfn(nf):
1017 results[nf] = st
1040 results[nf] = st
1018 elif ((matchalways or matchfn(nf))
1041 elif ((matchalways or matchfn(nf))
1019 and not ignore(nf)):
1042 and not ignore(nf)):
1020 # unknown file -- normalize if necessary
1043 # unknown file -- normalize if necessary
1021 if not alreadynormed:
1044 if not alreadynormed:
1022 nf = normalize(nf, False, True)
1045 nf = normalize(nf, False, True)
1023 results[nf] = st
1046 results[nf] = st
1024 elif nf in dmap and (matchalways or matchfn(nf)):
1047 elif nf in dmap and (matchalways or matchfn(nf)):
1025 results[nf] = None
1048 results[nf] = None
1026
1049
1027 for nd, d in work:
1050 for nd, d in work:
1028 # alreadynormed means that processwork doesn't have to do any
1051 # alreadynormed means that processwork doesn't have to do any
1029 # expensive directory normalization
1052 # expensive directory normalization
1030 alreadynormed = not normalize or nd == d
1053 alreadynormed = not normalize or nd == d
1031 traverse([d], alreadynormed)
1054 traverse([d], alreadynormed)
1032
1055
1033 for s in subrepos:
1056 for s in subrepos:
1034 del results[s]
1057 del results[s]
1035 del results['.hg']
1058 del results['.hg']
1036
1059
1037 # step 3: visit remaining files from dmap
1060 # step 3: visit remaining files from dmap
1038 if not skipstep3 and not exact:
1061 if not skipstep3 and not exact:
1039 # If a dmap file is not in results yet, it was either
1062 # If a dmap file is not in results yet, it was either
1040 # a) not matching matchfn b) ignored, c) missing, or d) under a
1063 # a) not matching matchfn b) ignored, c) missing, or d) under a
1041 # symlink directory.
1064 # symlink directory.
1042 if not results and matchalways:
1065 if not results and matchalways:
1043 visit = dmap.keys()
1066 visit = dmap.keys()
1044 else:
1067 else:
1045 visit = [f for f in dmap if f not in results and matchfn(f)]
1068 visit = [f for f in dmap if f not in results and matchfn(f)]
1046 visit.sort()
1069 visit.sort()
1047
1070
1048 if unknown:
1071 if unknown:
1049 # unknown == True means we walked all dirs under the roots
1072 # unknown == True means we walked all dirs under the roots
1050 # that wasn't ignored, and everything that matched was stat'ed
1073 # that wasn't ignored, and everything that matched was stat'ed
1051 # and is already in results.
1074 # and is already in results.
1052 # The rest must thus be ignored or under a symlink.
1075 # The rest must thus be ignored or under a symlink.
1053 audit_path = pathutil.pathauditor(self._root)
1076 audit_path = pathutil.pathauditor(self._root)
1054
1077
1055 for nf in iter(visit):
1078 for nf in iter(visit):
1056 # If a stat for the same file was already added with a
1079 # If a stat for the same file was already added with a
1057 # different case, don't add one for this, since that would
1080 # different case, don't add one for this, since that would
1058 # make it appear as if the file exists under both names
1081 # make it appear as if the file exists under both names
1059 # on disk.
1082 # on disk.
1060 if (normalizefile and
1083 if (normalizefile and
1061 normalizefile(nf, True, True) in results):
1084 normalizefile(nf, True, True) in results):
1062 results[nf] = None
1085 results[nf] = None
1063 # Report ignored items in the dmap as long as they are not
1086 # Report ignored items in the dmap as long as they are not
1064 # under a symlink directory.
1087 # under a symlink directory.
1065 elif audit_path.check(nf):
1088 elif audit_path.check(nf):
1066 try:
1089 try:
1067 results[nf] = lstat(join(nf))
1090 results[nf] = lstat(join(nf))
1068 # file was just ignored, no links, and exists
1091 # file was just ignored, no links, and exists
1069 except OSError:
1092 except OSError:
1070 # file doesn't exist
1093 # file doesn't exist
1071 results[nf] = None
1094 results[nf] = None
1072 else:
1095 else:
1073 # It's either missing or under a symlink directory
1096 # It's either missing or under a symlink directory
1074 # which we in this case report as missing
1097 # which we in this case report as missing
1075 results[nf] = None
1098 results[nf] = None
1076 else:
1099 else:
1077 # We may not have walked the full directory tree above,
1100 # We may not have walked the full directory tree above,
1078 # so stat and check everything we missed.
1101 # so stat and check everything we missed.
1079 nf = iter(visit).next
1102 nf = iter(visit).next
1080 for st in util.statfiles([join(i) for i in visit]):
1103 for st in util.statfiles([join(i) for i in visit]):
1081 results[nf()] = st
1104 results[nf()] = st
1082 return results
1105 return results
1083
1106
1084 def status(self, match, subrepos, ignored, clean, unknown):
1107 def status(self, match, subrepos, ignored, clean, unknown):
1085 '''Determine the status of the working copy relative to the
1108 '''Determine the status of the working copy relative to the
1086 dirstate and return a pair of (unsure, status), where status is of type
1109 dirstate and return a pair of (unsure, status), where status is of type
1087 scmutil.status and:
1110 scmutil.status and:
1088
1111
1089 unsure:
1112 unsure:
1090 files that might have been modified since the dirstate was
1113 files that might have been modified since the dirstate was
1091 written, but need to be read to be sure (size is the same
1114 written, but need to be read to be sure (size is the same
1092 but mtime differs)
1115 but mtime differs)
1093 status.modified:
1116 status.modified:
1094 files that have definitely been modified since the dirstate
1117 files that have definitely been modified since the dirstate
1095 was written (different size or mode)
1118 was written (different size or mode)
1096 status.clean:
1119 status.clean:
1097 files that have definitely not been modified since the
1120 files that have definitely not been modified since the
1098 dirstate was written
1121 dirstate was written
1099 '''
1122 '''
1100 listignored, listclean, listunknown = ignored, clean, unknown
1123 listignored, listclean, listunknown = ignored, clean, unknown
1101 lookup, modified, added, unknown, ignored = [], [], [], [], []
1124 lookup, modified, added, unknown, ignored = [], [], [], [], []
1102 removed, deleted, clean = [], [], []
1125 removed, deleted, clean = [], [], []
1103
1126
1104 dmap = self._map
1127 dmap = self._map
1105 ladd = lookup.append # aka "unsure"
1128 ladd = lookup.append # aka "unsure"
1106 madd = modified.append
1129 madd = modified.append
1107 aadd = added.append
1130 aadd = added.append
1108 uadd = unknown.append
1131 uadd = unknown.append
1109 iadd = ignored.append
1132 iadd = ignored.append
1110 radd = removed.append
1133 radd = removed.append
1111 dadd = deleted.append
1134 dadd = deleted.append
1112 cadd = clean.append
1135 cadd = clean.append
1113 mexact = match.exact
1136 mexact = match.exact
1114 dirignore = self._dirignore
1137 dirignore = self._dirignore
1115 checkexec = self._checkexec
1138 checkexec = self._checkexec
1116 copymap = self._copymap
1139 copymap = self._copymap
1117 lastnormaltime = self._lastnormaltime
1140 lastnormaltime = self._lastnormaltime
1118
1141
1119 # We need to do full walks when either
1142 # We need to do full walks when either
1120 # - we're listing all clean files, or
1143 # - we're listing all clean files, or
1121 # - match.traversedir does something, because match.traversedir should
1144 # - match.traversedir does something, because match.traversedir should
1122 # be called for every dir in the working dir
1145 # be called for every dir in the working dir
1123 full = listclean or match.traversedir is not None
1146 full = listclean or match.traversedir is not None
1124 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1147 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1125 full=full).iteritems():
1148 full=full).iteritems():
1126 if fn not in dmap:
1149 if fn not in dmap:
1127 if (listignored or mexact(fn)) and dirignore(fn):
1150 if (listignored or mexact(fn)) and dirignore(fn):
1128 if listignored:
1151 if listignored:
1129 iadd(fn)
1152 iadd(fn)
1130 else:
1153 else:
1131 uadd(fn)
1154 uadd(fn)
1132 continue
1155 continue
1133
1156
1134 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1157 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1135 # written like that for performance reasons. dmap[fn] is not a
1158 # written like that for performance reasons. dmap[fn] is not a
1136 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1159 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1137 # opcode has fast paths when the value to be unpacked is a tuple or
1160 # opcode has fast paths when the value to be unpacked is a tuple or
1138 # a list, but falls back to creating a full-fledged iterator in
1161 # a list, but falls back to creating a full-fledged iterator in
1139 # general. That is much slower than simply accessing and storing the
1162 # general. That is much slower than simply accessing and storing the
1140 # tuple members one by one.
1163 # tuple members one by one.
1141 t = dmap[fn]
1164 t = dmap[fn]
1142 state = t[0]
1165 state = t[0]
1143 mode = t[1]
1166 mode = t[1]
1144 size = t[2]
1167 size = t[2]
1145 time = t[3]
1168 time = t[3]
1146
1169
1147 if not st and state in "nma":
1170 if not st and state in "nma":
1148 dadd(fn)
1171 dadd(fn)
1149 elif state == 'n':
1172 elif state == 'n':
1150 if (size >= 0 and
1173 if (size >= 0 and
1151 ((size != st.st_size and size != st.st_size & _rangemask)
1174 ((size != st.st_size and size != st.st_size & _rangemask)
1152 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1175 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1153 or size == -2 # other parent
1176 or size == -2 # other parent
1154 or fn in copymap):
1177 or fn in copymap):
1155 madd(fn)
1178 madd(fn)
1156 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1179 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1157 ladd(fn)
1180 ladd(fn)
1158 elif st.st_mtime == lastnormaltime:
1181 elif st.st_mtime == lastnormaltime:
1159 # fn may have just been marked as normal and it may have
1182 # fn may have just been marked as normal and it may have
1160 # changed in the same second without changing its size.
1183 # changed in the same second without changing its size.
1161 # This can happen if we quickly do multiple commits.
1184 # This can happen if we quickly do multiple commits.
1162 # Force lookup, so we don't miss such a racy file change.
1185 # Force lookup, so we don't miss such a racy file change.
1163 ladd(fn)
1186 ladd(fn)
1164 elif listclean:
1187 elif listclean:
1165 cadd(fn)
1188 cadd(fn)
1166 elif state == 'm':
1189 elif state == 'm':
1167 madd(fn)
1190 madd(fn)
1168 elif state == 'a':
1191 elif state == 'a':
1169 aadd(fn)
1192 aadd(fn)
1170 elif state == 'r':
1193 elif state == 'r':
1171 radd(fn)
1194 radd(fn)
1172
1195
1173 return (lookup, scmutil.status(modified, added, removed, deleted,
1196 return (lookup, scmutil.status(modified, added, removed, deleted,
1174 unknown, ignored, clean))
1197 unknown, ignored, clean))
1175
1198
1176 def matches(self, match):
1199 def matches(self, match):
1177 '''
1200 '''
1178 return files in the dirstate (in whatever state) filtered by match
1201 return files in the dirstate (in whatever state) filtered by match
1179 '''
1202 '''
1180 dmap = self._map
1203 dmap = self._map
1181 if match.always():
1204 if match.always():
1182 return dmap.keys()
1205 return dmap.keys()
1183 files = match.files()
1206 files = match.files()
1184 if match.isexact():
1207 if match.isexact():
1185 # fast path -- filter the other way around, since typically files is
1208 # fast path -- filter the other way around, since typically files is
1186 # much smaller than dmap
1209 # much smaller than dmap
1187 return [f for f in files if f in dmap]
1210 return [f for f in files if f in dmap]
1188 if match.prefix() and all(fn in dmap for fn in files):
1211 if match.prefix() and all(fn in dmap for fn in files):
1189 # fast path -- all the values are known to be files, so just return
1212 # fast path -- all the values are known to be files, so just return
1190 # that
1213 # that
1191 return list(files)
1214 return list(files)
1192 return [f for f in dmap if match(f)]
1215 return [f for f in dmap if match(f)]
1193
1216
1194 def _actualfilename(self, tr):
1217 def _actualfilename(self, tr):
1195 if tr:
1218 if tr:
1196 return self._pendingfilename
1219 return self._pendingfilename
1197 else:
1220 else:
1198 return self._filename
1221 return self._filename
1199
1222
1200 def savebackup(self, tr, suffix='', prefix=''):
1223 def savebackup(self, tr, suffix='', prefix=''):
1201 '''Save current dirstate into backup file with suffix'''
1224 '''Save current dirstate into backup file with suffix'''
1202 assert len(suffix) > 0 or len(prefix) > 0
1225 assert len(suffix) > 0 or len(prefix) > 0
1203 filename = self._actualfilename(tr)
1226 filename = self._actualfilename(tr)
1204
1227
1205 # use '_writedirstate' instead of 'write' to write changes certainly,
1228 # use '_writedirstate' instead of 'write' to write changes certainly,
1206 # because the latter omits writing out if transaction is running.
1229 # because the latter omits writing out if transaction is running.
1207 # output file will be used to create backup of dirstate at this point.
1230 # output file will be used to create backup of dirstate at this point.
1208 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1231 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1209 checkambig=True))
1232 checkambig=True))
1210
1233
1211 if tr:
1234 if tr:
1212 # ensure that subsequent tr.writepending returns True for
1235 # ensure that subsequent tr.writepending returns True for
1213 # changes written out above, even if dirstate is never
1236 # changes written out above, even if dirstate is never
1214 # changed after this
1237 # changed after this
1215 tr.addfilegenerator('dirstate', (self._filename,),
1238 tr.addfilegenerator('dirstate', (self._filename,),
1216 self._writedirstate, location='plain')
1239 self._writedirstate, location='plain')
1217
1240
1218 # ensure that pending file written above is unlinked at
1241 # ensure that pending file written above is unlinked at
1219 # failure, even if tr.writepending isn't invoked until the
1242 # failure, even if tr.writepending isn't invoked until the
1220 # end of this transaction
1243 # end of this transaction
1221 tr.registertmp(filename, location='plain')
1244 tr.registertmp(filename, location='plain')
1222
1245
1223 self._opener.write(prefix + self._filename + suffix,
1246 self._opener.write(prefix + self._filename + suffix,
1224 self._opener.tryread(filename))
1247 self._opener.tryread(filename))
1225
1248
1226 def restorebackup(self, tr, suffix='', prefix=''):
1249 def restorebackup(self, tr, suffix='', prefix=''):
1227 '''Restore dirstate by backup file with suffix'''
1250 '''Restore dirstate by backup file with suffix'''
1228 assert len(suffix) > 0 or len(prefix) > 0
1251 assert len(suffix) > 0 or len(prefix) > 0
1229 # this "invalidate()" prevents "wlock.release()" from writing
1252 # this "invalidate()" prevents "wlock.release()" from writing
1230 # changes of dirstate out after restoring from backup file
1253 # changes of dirstate out after restoring from backup file
1231 self.invalidate()
1254 self.invalidate()
1232 filename = self._actualfilename(tr)
1255 filename = self._actualfilename(tr)
1233 # using self._filename to avoid having "pending" in the backup filename
1256 # using self._filename to avoid having "pending" in the backup filename
1234 self._opener.rename(prefix + self._filename + suffix, filename,
1257 self._opener.rename(prefix + self._filename + suffix, filename,
1235 checkambig=True)
1258 checkambig=True)
1236
1259
1237 def clearbackup(self, tr, suffix='', prefix=''):
1260 def clearbackup(self, tr, suffix='', prefix=''):
1238 '''Clear backup file with suffix'''
1261 '''Clear backup file with suffix'''
1239 assert len(suffix) > 0 or len(prefix) > 0
1262 assert len(suffix) > 0 or len(prefix) > 0
1240 # using self._filename to avoid having "pending" in the backup filename
1263 # using self._filename to avoid having "pending" in the backup filename
1241 self._opener.unlink(prefix + self._filename + suffix)
1264 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now