##// END OF EJS Templates
dirstate: add code to update the non-normal set...
Laurent Charignon -
r27590:f2d0ada0 default
parent child Browse files
Show More
@@ -1,1202 +1,1216 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 osutil,
20 osutil,
21 parsers,
21 parsers,
22 pathutil,
22 pathutil,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 propertycache = util.propertycache
27 propertycache = util.propertycache
28 filecache = scmutil.filecache
28 filecache = scmutil.filecache
29 _rangemask = 0x7fffffff
29 _rangemask = 0x7fffffff
30
30
31 dirstatetuple = parsers.dirstatetuple
31 dirstatetuple = parsers.dirstatetuple
32
32
33 class repocache(filecache):
33 class repocache(filecache):
34 """filecache for files in .hg/"""
34 """filecache for files in .hg/"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj._opener.join(fname)
36 return obj._opener.join(fname)
37
37
38 class rootcache(filecache):
38 class rootcache(filecache):
39 """filecache for files in the repository root"""
39 """filecache for files in the repository root"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._join(fname)
41 return obj._join(fname)
42
42
43 def _getfsnow(vfs):
43 def _getfsnow(vfs):
44 '''Get "now" timestamp on filesystem'''
44 '''Get "now" timestamp on filesystem'''
45 tmpfd, tmpname = vfs.mkstemp()
45 tmpfd, tmpname = vfs.mkstemp()
46 try:
46 try:
47 return os.fstat(tmpfd).st_mtime
47 return os.fstat(tmpfd).st_mtime
48 finally:
48 finally:
49 os.close(tmpfd)
49 os.close(tmpfd)
50 vfs.unlink(tmpname)
50 vfs.unlink(tmpname)
51
51
52 def nonnormalentries(dmap):
52 def nonnormalentries(dmap):
53 '''Compute the nonnormal dirstate entries from the dmap'''
53 '''Compute the nonnormal dirstate entries from the dmap'''
54 return set(fname for fname, e in dmap.iteritems()
54 return set(fname for fname, e in dmap.iteritems()
55 if e[0] != 'n' or e[3] == -1)
55 if e[0] != 'n' or e[3] == -1)
56
56
57 def _trypending(root, vfs, filename):
57 def _trypending(root, vfs, filename):
58 '''Open file to be read according to HG_PENDING environment variable
58 '''Open file to be read according to HG_PENDING environment variable
59
59
60 This opens '.pending' of specified 'filename' only when HG_PENDING
60 This opens '.pending' of specified 'filename' only when HG_PENDING
61 is equal to 'root'.
61 is equal to 'root'.
62
62
63 This returns '(fp, is_pending_opened)' tuple.
63 This returns '(fp, is_pending_opened)' tuple.
64 '''
64 '''
65 if root == os.environ.get('HG_PENDING'):
65 if root == os.environ.get('HG_PENDING'):
66 try:
66 try:
67 return (vfs('%s.pending' % filename), True)
67 return (vfs('%s.pending' % filename), True)
68 except IOError as inst:
68 except IOError as inst:
69 if inst.errno != errno.ENOENT:
69 if inst.errno != errno.ENOENT:
70 raise
70 raise
71 return (vfs(filename), False)
71 return (vfs(filename), False)
72
72
73 class dirstate(object):
73 class dirstate(object):
74
74
75 def __init__(self, opener, ui, root, validate):
75 def __init__(self, opener, ui, root, validate):
76 '''Create a new dirstate object.
76 '''Create a new dirstate object.
77
77
78 opener is an open()-like callable that can be used to open the
78 opener is an open()-like callable that can be used to open the
79 dirstate file; root is the root of the directory tracked by
79 dirstate file; root is the root of the directory tracked by
80 the dirstate.
80 the dirstate.
81 '''
81 '''
82 self._opener = opener
82 self._opener = opener
83 self._validate = validate
83 self._validate = validate
84 self._root = root
84 self._root = root
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
86 # UNC path pointing to root share (issue4557)
86 # UNC path pointing to root share (issue4557)
87 self._rootdir = pathutil.normasprefix(root)
87 self._rootdir = pathutil.normasprefix(root)
88 # internal config: ui.forcecwd
88 # internal config: ui.forcecwd
89 forcecwd = ui.config('ui', 'forcecwd')
89 forcecwd = ui.config('ui', 'forcecwd')
90 if forcecwd:
90 if forcecwd:
91 self._cwd = forcecwd
91 self._cwd = forcecwd
92 self._dirty = False
92 self._dirty = False
93 self._dirtypl = False
93 self._dirtypl = False
94 self._lastnormaltime = 0
94 self._lastnormaltime = 0
95 self._ui = ui
95 self._ui = ui
96 self._filecache = {}
96 self._filecache = {}
97 self._parentwriters = 0
97 self._parentwriters = 0
98 self._filename = 'dirstate'
98 self._filename = 'dirstate'
99 self._pendingfilename = '%s.pending' % self._filename
99 self._pendingfilename = '%s.pending' % self._filename
100
100
101 # for consistent view between _pl() and _read() invocations
101 # for consistent view between _pl() and _read() invocations
102 self._pendingmode = None
102 self._pendingmode = None
103
103
104 def beginparentchange(self):
104 def beginparentchange(self):
105 '''Marks the beginning of a set of changes that involve changing
105 '''Marks the beginning of a set of changes that involve changing
106 the dirstate parents. If there is an exception during this time,
106 the dirstate parents. If there is an exception during this time,
107 the dirstate will not be written when the wlock is released. This
107 the dirstate will not be written when the wlock is released. This
108 prevents writing an incoherent dirstate where the parent doesn't
108 prevents writing an incoherent dirstate where the parent doesn't
109 match the contents.
109 match the contents.
110 '''
110 '''
111 self._parentwriters += 1
111 self._parentwriters += 1
112
112
113 def endparentchange(self):
113 def endparentchange(self):
114 '''Marks the end of a set of changes that involve changing the
114 '''Marks the end of a set of changes that involve changing the
115 dirstate parents. Once all parent changes have been marked done,
115 dirstate parents. Once all parent changes have been marked done,
116 the wlock will be free to write the dirstate on release.
116 the wlock will be free to write the dirstate on release.
117 '''
117 '''
118 if self._parentwriters > 0:
118 if self._parentwriters > 0:
119 self._parentwriters -= 1
119 self._parentwriters -= 1
120
120
121 def pendingparentchange(self):
121 def pendingparentchange(self):
122 '''Returns true if the dirstate is in the middle of a set of changes
122 '''Returns true if the dirstate is in the middle of a set of changes
123 that modify the dirstate parent.
123 that modify the dirstate parent.
124 '''
124 '''
125 return self._parentwriters > 0
125 return self._parentwriters > 0
126
126
127 @propertycache
127 @propertycache
128 def _map(self):
128 def _map(self):
129 '''Return the dirstate contents as a map from filename to
129 '''Return the dirstate contents as a map from filename to
130 (state, mode, size, time).'''
130 (state, mode, size, time).'''
131 self._read()
131 self._read()
132 return self._map
132 return self._map
133
133
134 @propertycache
134 @propertycache
135 def _copymap(self):
135 def _copymap(self):
136 self._read()
136 self._read()
137 return self._copymap
137 return self._copymap
138
138
139 @propertycache
139 @propertycache
140 def _nonnormalset(self):
140 def _nonnormalset(self):
141 return nonnormalentries(self._map)
141 return nonnormalentries(self._map)
142
142
143 @propertycache
143 @propertycache
144 def _filefoldmap(self):
144 def _filefoldmap(self):
145 try:
145 try:
146 makefilefoldmap = parsers.make_file_foldmap
146 makefilefoldmap = parsers.make_file_foldmap
147 except AttributeError:
147 except AttributeError:
148 pass
148 pass
149 else:
149 else:
150 return makefilefoldmap(self._map, util.normcasespec,
150 return makefilefoldmap(self._map, util.normcasespec,
151 util.normcasefallback)
151 util.normcasefallback)
152
152
153 f = {}
153 f = {}
154 normcase = util.normcase
154 normcase = util.normcase
155 for name, s in self._map.iteritems():
155 for name, s in self._map.iteritems():
156 if s[0] != 'r':
156 if s[0] != 'r':
157 f[normcase(name)] = name
157 f[normcase(name)] = name
158 f['.'] = '.' # prevents useless util.fspath() invocation
158 f['.'] = '.' # prevents useless util.fspath() invocation
159 return f
159 return f
160
160
161 @propertycache
161 @propertycache
162 def _dirfoldmap(self):
162 def _dirfoldmap(self):
163 f = {}
163 f = {}
164 normcase = util.normcase
164 normcase = util.normcase
165 for name in self._dirs:
165 for name in self._dirs:
166 f[normcase(name)] = name
166 f[normcase(name)] = name
167 return f
167 return f
168
168
169 @repocache('branch')
169 @repocache('branch')
170 def _branch(self):
170 def _branch(self):
171 try:
171 try:
172 return self._opener.read("branch").strip() or "default"
172 return self._opener.read("branch").strip() or "default"
173 except IOError as inst:
173 except IOError as inst:
174 if inst.errno != errno.ENOENT:
174 if inst.errno != errno.ENOENT:
175 raise
175 raise
176 return "default"
176 return "default"
177
177
178 @propertycache
178 @propertycache
179 def _pl(self):
179 def _pl(self):
180 try:
180 try:
181 fp = self._opendirstatefile()
181 fp = self._opendirstatefile()
182 st = fp.read(40)
182 st = fp.read(40)
183 fp.close()
183 fp.close()
184 l = len(st)
184 l = len(st)
185 if l == 40:
185 if l == 40:
186 return st[:20], st[20:40]
186 return st[:20], st[20:40]
187 elif l > 0 and l < 40:
187 elif l > 0 and l < 40:
188 raise error.Abort(_('working directory state appears damaged!'))
188 raise error.Abort(_('working directory state appears damaged!'))
189 except IOError as err:
189 except IOError as err:
190 if err.errno != errno.ENOENT:
190 if err.errno != errno.ENOENT:
191 raise
191 raise
192 return [nullid, nullid]
192 return [nullid, nullid]
193
193
194 @propertycache
194 @propertycache
195 def _dirs(self):
195 def _dirs(self):
196 return util.dirs(self._map, 'r')
196 return util.dirs(self._map, 'r')
197
197
198 def dirs(self):
198 def dirs(self):
199 return self._dirs
199 return self._dirs
200
200
201 @rootcache('.hgignore')
201 @rootcache('.hgignore')
202 def _ignore(self):
202 def _ignore(self):
203 files = []
203 files = []
204 if os.path.exists(self._join('.hgignore')):
204 if os.path.exists(self._join('.hgignore')):
205 files.append(self._join('.hgignore'))
205 files.append(self._join('.hgignore'))
206 for name, path in self._ui.configitems("ui"):
206 for name, path in self._ui.configitems("ui"):
207 if name == 'ignore' or name.startswith('ignore.'):
207 if name == 'ignore' or name.startswith('ignore.'):
208 # we need to use os.path.join here rather than self._join
208 # we need to use os.path.join here rather than self._join
209 # because path is arbitrary and user-specified
209 # because path is arbitrary and user-specified
210 files.append(os.path.join(self._rootdir, util.expandpath(path)))
210 files.append(os.path.join(self._rootdir, util.expandpath(path)))
211
211
212 if not files:
212 if not files:
213 return util.never
213 return util.never
214
214
215 pats = ['include:%s' % f for f in files]
215 pats = ['include:%s' % f for f in files]
216 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
217
217
218 @propertycache
218 @propertycache
219 def _slash(self):
219 def _slash(self):
220 return self._ui.configbool('ui', 'slash') and os.sep != '/'
220 return self._ui.configbool('ui', 'slash') and os.sep != '/'
221
221
222 @propertycache
222 @propertycache
223 def _checklink(self):
223 def _checklink(self):
224 return util.checklink(self._root)
224 return util.checklink(self._root)
225
225
226 @propertycache
226 @propertycache
227 def _checkexec(self):
227 def _checkexec(self):
228 return util.checkexec(self._root)
228 return util.checkexec(self._root)
229
229
230 @propertycache
230 @propertycache
231 def _checkcase(self):
231 def _checkcase(self):
232 return not util.checkcase(self._join('.hg'))
232 return not util.checkcase(self._join('.hg'))
233
233
234 def _join(self, f):
234 def _join(self, f):
235 # much faster than os.path.join()
235 # much faster than os.path.join()
236 # it's safe because f is always a relative path
236 # it's safe because f is always a relative path
237 return self._rootdir + f
237 return self._rootdir + f
238
238
239 def flagfunc(self, buildfallback):
239 def flagfunc(self, buildfallback):
240 if self._checklink and self._checkexec:
240 if self._checklink and self._checkexec:
241 def f(x):
241 def f(x):
242 try:
242 try:
243 st = os.lstat(self._join(x))
243 st = os.lstat(self._join(x))
244 if util.statislink(st):
244 if util.statislink(st):
245 return 'l'
245 return 'l'
246 if util.statisexec(st):
246 if util.statisexec(st):
247 return 'x'
247 return 'x'
248 except OSError:
248 except OSError:
249 pass
249 pass
250 return ''
250 return ''
251 return f
251 return f
252
252
253 fallback = buildfallback()
253 fallback = buildfallback()
254 if self._checklink:
254 if self._checklink:
255 def f(x):
255 def f(x):
256 if os.path.islink(self._join(x)):
256 if os.path.islink(self._join(x)):
257 return 'l'
257 return 'l'
258 if 'x' in fallback(x):
258 if 'x' in fallback(x):
259 return 'x'
259 return 'x'
260 return ''
260 return ''
261 return f
261 return f
262 if self._checkexec:
262 if self._checkexec:
263 def f(x):
263 def f(x):
264 if 'l' in fallback(x):
264 if 'l' in fallback(x):
265 return 'l'
265 return 'l'
266 if util.isexec(self._join(x)):
266 if util.isexec(self._join(x)):
267 return 'x'
267 return 'x'
268 return ''
268 return ''
269 return f
269 return f
270 else:
270 else:
271 return fallback
271 return fallback
272
272
273 @propertycache
273 @propertycache
274 def _cwd(self):
274 def _cwd(self):
275 return os.getcwd()
275 return os.getcwd()
276
276
277 def getcwd(self):
277 def getcwd(self):
278 '''Return the path from which a canonical path is calculated.
278 '''Return the path from which a canonical path is calculated.
279
279
280 This path should be used to resolve file patterns or to convert
280 This path should be used to resolve file patterns or to convert
281 canonical paths back to file paths for display. It shouldn't be
281 canonical paths back to file paths for display. It shouldn't be
282 used to get real file paths. Use vfs functions instead.
282 used to get real file paths. Use vfs functions instead.
283 '''
283 '''
284 cwd = self._cwd
284 cwd = self._cwd
285 if cwd == self._root:
285 if cwd == self._root:
286 return ''
286 return ''
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 rootsep = self._root
288 rootsep = self._root
289 if not util.endswithsep(rootsep):
289 if not util.endswithsep(rootsep):
290 rootsep += os.sep
290 rootsep += os.sep
291 if cwd.startswith(rootsep):
291 if cwd.startswith(rootsep):
292 return cwd[len(rootsep):]
292 return cwd[len(rootsep):]
293 else:
293 else:
294 # we're outside the repo. return an absolute path.
294 # we're outside the repo. return an absolute path.
295 return cwd
295 return cwd
296
296
297 def pathto(self, f, cwd=None):
297 def pathto(self, f, cwd=None):
298 if cwd is None:
298 if cwd is None:
299 cwd = self.getcwd()
299 cwd = self.getcwd()
300 path = util.pathto(self._root, cwd, f)
300 path = util.pathto(self._root, cwd, f)
301 if self._slash:
301 if self._slash:
302 return util.pconvert(path)
302 return util.pconvert(path)
303 return path
303 return path
304
304
305 def __getitem__(self, key):
305 def __getitem__(self, key):
306 '''Return the current state of key (a filename) in the dirstate.
306 '''Return the current state of key (a filename) in the dirstate.
307
307
308 States are:
308 States are:
309 n normal
309 n normal
310 m needs merging
310 m needs merging
311 r marked for removal
311 r marked for removal
312 a marked for addition
312 a marked for addition
313 ? not tracked
313 ? not tracked
314 '''
314 '''
315 return self._map.get(key, ("?",))[0]
315 return self._map.get(key, ("?",))[0]
316
316
317 def __contains__(self, key):
317 def __contains__(self, key):
318 return key in self._map
318 return key in self._map
319
319
320 def __iter__(self):
320 def __iter__(self):
321 for x in sorted(self._map):
321 for x in sorted(self._map):
322 yield x
322 yield x
323
323
324 def iteritems(self):
324 def iteritems(self):
325 return self._map.iteritems()
325 return self._map.iteritems()
326
326
327 def parents(self):
327 def parents(self):
328 return [self._validate(p) for p in self._pl]
328 return [self._validate(p) for p in self._pl]
329
329
330 def p1(self):
330 def p1(self):
331 return self._validate(self._pl[0])
331 return self._validate(self._pl[0])
332
332
333 def p2(self):
333 def p2(self):
334 return self._validate(self._pl[1])
334 return self._validate(self._pl[1])
335
335
336 def branch(self):
336 def branch(self):
337 return encoding.tolocal(self._branch)
337 return encoding.tolocal(self._branch)
338
338
339 def setparents(self, p1, p2=nullid):
339 def setparents(self, p1, p2=nullid):
340 """Set dirstate parents to p1 and p2.
340 """Set dirstate parents to p1 and p2.
341
341
342 When moving from two parents to one, 'm' merged entries a
342 When moving from two parents to one, 'm' merged entries a
343 adjusted to normal and previous copy records discarded and
343 adjusted to normal and previous copy records discarded and
344 returned by the call.
344 returned by the call.
345
345
346 See localrepo.setparents()
346 See localrepo.setparents()
347 """
347 """
348 if self._parentwriters == 0:
348 if self._parentwriters == 0:
349 raise ValueError("cannot set dirstate parent without "
349 raise ValueError("cannot set dirstate parent without "
350 "calling dirstate.beginparentchange")
350 "calling dirstate.beginparentchange")
351
351
352 self._dirty = self._dirtypl = True
352 self._dirty = self._dirtypl = True
353 oldp2 = self._pl[1]
353 oldp2 = self._pl[1]
354 self._pl = p1, p2
354 self._pl = p1, p2
355 copies = {}
355 copies = {}
356 if oldp2 != nullid and p2 == nullid:
356 if oldp2 != nullid and p2 == nullid:
357 for f, s in self._map.iteritems():
357 for f, s in self._map.iteritems():
358 # Discard 'm' markers when moving away from a merge state
358 # Discard 'm' markers when moving away from a merge state
359 if s[0] == 'm':
359 if s[0] == 'm':
360 if f in self._copymap:
360 if f in self._copymap:
361 copies[f] = self._copymap[f]
361 copies[f] = self._copymap[f]
362 self.normallookup(f)
362 self.normallookup(f)
363 # Also fix up otherparent markers
363 # Also fix up otherparent markers
364 elif s[0] == 'n' and s[2] == -2:
364 elif s[0] == 'n' and s[2] == -2:
365 if f in self._copymap:
365 if f in self._copymap:
366 copies[f] = self._copymap[f]
366 copies[f] = self._copymap[f]
367 self.add(f)
367 self.add(f)
368 return copies
368 return copies
369
369
370 def setbranch(self, branch):
370 def setbranch(self, branch):
371 self._branch = encoding.fromlocal(branch)
371 self._branch = encoding.fromlocal(branch)
372 f = self._opener('branch', 'w', atomictemp=True)
372 f = self._opener('branch', 'w', atomictemp=True)
373 try:
373 try:
374 f.write(self._branch + '\n')
374 f.write(self._branch + '\n')
375 f.close()
375 f.close()
376
376
377 # make sure filecache has the correct stat info for _branch after
377 # make sure filecache has the correct stat info for _branch after
378 # replacing the underlying file
378 # replacing the underlying file
379 ce = self._filecache['_branch']
379 ce = self._filecache['_branch']
380 if ce:
380 if ce:
381 ce.refresh()
381 ce.refresh()
382 except: # re-raises
382 except: # re-raises
383 f.discard()
383 f.discard()
384 raise
384 raise
385
385
386 def _opendirstatefile(self):
386 def _opendirstatefile(self):
387 fp, mode = _trypending(self._root, self._opener, self._filename)
387 fp, mode = _trypending(self._root, self._opener, self._filename)
388 if self._pendingmode is not None and self._pendingmode != mode:
388 if self._pendingmode is not None and self._pendingmode != mode:
389 fp.close()
389 fp.close()
390 raise error.Abort(_('working directory state may be '
390 raise error.Abort(_('working directory state may be '
391 'changed parallelly'))
391 'changed parallelly'))
392 self._pendingmode = mode
392 self._pendingmode = mode
393 return fp
393 return fp
394
394
395 def _read(self):
395 def _read(self):
396 self._map = {}
396 self._map = {}
397 self._copymap = {}
397 self._copymap = {}
398 try:
398 try:
399 fp = self._opendirstatefile()
399 fp = self._opendirstatefile()
400 try:
400 try:
401 st = fp.read()
401 st = fp.read()
402 finally:
402 finally:
403 fp.close()
403 fp.close()
404 except IOError as err:
404 except IOError as err:
405 if err.errno != errno.ENOENT:
405 if err.errno != errno.ENOENT:
406 raise
406 raise
407 return
407 return
408 if not st:
408 if not st:
409 return
409 return
410
410
411 if util.safehasattr(parsers, 'dict_new_presized'):
411 if util.safehasattr(parsers, 'dict_new_presized'):
412 # Make an estimate of the number of files in the dirstate based on
412 # Make an estimate of the number of files in the dirstate based on
413 # its size. From a linear regression on a set of real-world repos,
413 # its size. From a linear regression on a set of real-world repos,
414 # all over 10,000 files, the size of a dirstate entry is 85
414 # all over 10,000 files, the size of a dirstate entry is 85
415 # bytes. The cost of resizing is significantly higher than the cost
415 # bytes. The cost of resizing is significantly higher than the cost
416 # of filling in a larger presized dict, so subtract 20% from the
416 # of filling in a larger presized dict, so subtract 20% from the
417 # size.
417 # size.
418 #
418 #
419 # This heuristic is imperfect in many ways, so in a future dirstate
419 # This heuristic is imperfect in many ways, so in a future dirstate
420 # format update it makes sense to just record the number of entries
420 # format update it makes sense to just record the number of entries
421 # on write.
421 # on write.
422 self._map = parsers.dict_new_presized(len(st) / 71)
422 self._map = parsers.dict_new_presized(len(st) / 71)
423
423
424 # Python's garbage collector triggers a GC each time a certain number
424 # Python's garbage collector triggers a GC each time a certain number
425 # of container objects (the number being defined by
425 # of container objects (the number being defined by
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 # for each file in the dirstate. The C version then immediately marks
427 # for each file in the dirstate. The C version then immediately marks
428 # them as not to be tracked by the collector. However, this has no
428 # them as not to be tracked by the collector. However, this has no
429 # effect on when GCs are triggered, only on what objects the GC looks
429 # effect on when GCs are triggered, only on what objects the GC looks
430 # into. This means that O(number of files) GCs are unavoidable.
430 # into. This means that O(number of files) GCs are unavoidable.
431 # Depending on when in the process's lifetime the dirstate is parsed,
431 # Depending on when in the process's lifetime the dirstate is parsed,
432 # this can get very expensive. As a workaround, disable GC while
432 # this can get very expensive. As a workaround, disable GC while
433 # parsing the dirstate.
433 # parsing the dirstate.
434 #
434 #
435 # (we cannot decorate the function directly since it is in a C module)
435 # (we cannot decorate the function directly since it is in a C module)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 p = parse_dirstate(self._map, self._copymap, st)
437 p = parse_dirstate(self._map, self._copymap, st)
438 if not self._dirtypl:
438 if not self._dirtypl:
439 self._pl = p
439 self._pl = p
440
440
441 def invalidate(self):
441 def invalidate(self):
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 "_pl", "_dirs", "_ignore"):
443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 if a in self.__dict__:
444 if a in self.__dict__:
445 delattr(self, a)
445 delattr(self, a)
446 self._lastnormaltime = 0
446 self._lastnormaltime = 0
447 self._dirty = False
447 self._dirty = False
448 self._parentwriters = 0
448 self._parentwriters = 0
449
449
450 def copy(self, source, dest):
450 def copy(self, source, dest):
451 """Mark dest as a copy of source. Unmark dest if source is None."""
451 """Mark dest as a copy of source. Unmark dest if source is None."""
452 if source == dest:
452 if source == dest:
453 return
453 return
454 self._dirty = True
454 self._dirty = True
455 if source is not None:
455 if source is not None:
456 self._copymap[dest] = source
456 self._copymap[dest] = source
457 elif dest in self._copymap:
457 elif dest in self._copymap:
458 del self._copymap[dest]
458 del self._copymap[dest]
459
459
460 def copied(self, file):
460 def copied(self, file):
461 return self._copymap.get(file, None)
461 return self._copymap.get(file, None)
462
462
463 def copies(self):
463 def copies(self):
464 return self._copymap
464 return self._copymap
465
465
466 def _droppath(self, f):
466 def _droppath(self, f):
467 if self[f] not in "?r" and "_dirs" in self.__dict__:
467 if self[f] not in "?r" and "_dirs" in self.__dict__:
468 self._dirs.delpath(f)
468 self._dirs.delpath(f)
469
469
470 if "_filefoldmap" in self.__dict__:
470 if "_filefoldmap" in self.__dict__:
471 normed = util.normcase(f)
471 normed = util.normcase(f)
472 if normed in self._filefoldmap:
472 if normed in self._filefoldmap:
473 del self._filefoldmap[normed]
473 del self._filefoldmap[normed]
474
474
475 def _addpath(self, f, state, mode, size, mtime):
475 def _addpath(self, f, state, mode, size, mtime):
476 oldstate = self[f]
476 oldstate = self[f]
477 if state == 'a' or oldstate == 'r':
477 if state == 'a' or oldstate == 'r':
478 scmutil.checkfilename(f)
478 scmutil.checkfilename(f)
479 if f in self._dirs:
479 if f in self._dirs:
480 raise error.Abort(_('directory %r already in dirstate') % f)
480 raise error.Abort(_('directory %r already in dirstate') % f)
481 # shadows
481 # shadows
482 for d in util.finddirs(f):
482 for d in util.finddirs(f):
483 if d in self._dirs:
483 if d in self._dirs:
484 break
484 break
485 if d in self._map and self[d] != 'r':
485 if d in self._map and self[d] != 'r':
486 raise error.Abort(
486 raise error.Abort(
487 _('file %r in dirstate clashes with %r') % (d, f))
487 _('file %r in dirstate clashes with %r') % (d, f))
488 if oldstate in "?r" and "_dirs" in self.__dict__:
488 if oldstate in "?r" and "_dirs" in self.__dict__:
489 self._dirs.addpath(f)
489 self._dirs.addpath(f)
490 self._dirty = True
490 self._dirty = True
491 self._map[f] = dirstatetuple(state, mode, size, mtime)
491 self._map[f] = dirstatetuple(state, mode, size, mtime)
492 if state != 'n' or mtime == -1:
493 self._nonnormalset.add(f)
492
494
493 def normal(self, f):
495 def normal(self, f):
494 '''Mark a file normal and clean.'''
496 '''Mark a file normal and clean.'''
495 s = os.lstat(self._join(f))
497 s = os.lstat(self._join(f))
496 mtime = s.st_mtime
498 mtime = s.st_mtime
497 self._addpath(f, 'n', s.st_mode,
499 self._addpath(f, 'n', s.st_mode,
498 s.st_size & _rangemask, mtime & _rangemask)
500 s.st_size & _rangemask, mtime & _rangemask)
499 if f in self._copymap:
501 if f in self._copymap:
500 del self._copymap[f]
502 del self._copymap[f]
503 if f in self._nonnormalset:
504 self._nonnormalset.remove(f)
501 if mtime > self._lastnormaltime:
505 if mtime > self._lastnormaltime:
502 # Remember the most recent modification timeslot for status(),
506 # Remember the most recent modification timeslot for status(),
503 # to make sure we won't miss future size-preserving file content
507 # to make sure we won't miss future size-preserving file content
504 # modifications that happen within the same timeslot.
508 # modifications that happen within the same timeslot.
505 self._lastnormaltime = mtime
509 self._lastnormaltime = mtime
506
510
507 def normallookup(self, f):
511 def normallookup(self, f):
508 '''Mark a file normal, but possibly dirty.'''
512 '''Mark a file normal, but possibly dirty.'''
509 if self._pl[1] != nullid and f in self._map:
513 if self._pl[1] != nullid and f in self._map:
510 # if there is a merge going on and the file was either
514 # if there is a merge going on and the file was either
511 # in state 'm' (-1) or coming from other parent (-2) before
515 # in state 'm' (-1) or coming from other parent (-2) before
512 # being removed, restore that state.
516 # being removed, restore that state.
513 entry = self._map[f]
517 entry = self._map[f]
514 if entry[0] == 'r' and entry[2] in (-1, -2):
518 if entry[0] == 'r' and entry[2] in (-1, -2):
515 source = self._copymap.get(f)
519 source = self._copymap.get(f)
516 if entry[2] == -1:
520 if entry[2] == -1:
517 self.merge(f)
521 self.merge(f)
518 elif entry[2] == -2:
522 elif entry[2] == -2:
519 self.otherparent(f)
523 self.otherparent(f)
520 if source:
524 if source:
521 self.copy(source, f)
525 self.copy(source, f)
522 return
526 return
523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
527 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
524 return
528 return
525 self._addpath(f, 'n', 0, -1, -1)
529 self._addpath(f, 'n', 0, -1, -1)
526 if f in self._copymap:
530 if f in self._copymap:
527 del self._copymap[f]
531 del self._copymap[f]
532 if f in self._nonnormalset:
533 self._nonnormalset.remove(f)
528
534
529 def otherparent(self, f):
535 def otherparent(self, f):
530 '''Mark as coming from the other parent, always dirty.'''
536 '''Mark as coming from the other parent, always dirty.'''
531 if self._pl[1] == nullid:
537 if self._pl[1] == nullid:
532 raise error.Abort(_("setting %r to other parent "
538 raise error.Abort(_("setting %r to other parent "
533 "only allowed in merges") % f)
539 "only allowed in merges") % f)
534 if f in self and self[f] == 'n':
540 if f in self and self[f] == 'n':
535 # merge-like
541 # merge-like
536 self._addpath(f, 'm', 0, -2, -1)
542 self._addpath(f, 'm', 0, -2, -1)
537 else:
543 else:
538 # add-like
544 # add-like
539 self._addpath(f, 'n', 0, -2, -1)
545 self._addpath(f, 'n', 0, -2, -1)
540
546
541 if f in self._copymap:
547 if f in self._copymap:
542 del self._copymap[f]
548 del self._copymap[f]
543
549
544 def add(self, f):
550 def add(self, f):
545 '''Mark a file added.'''
551 '''Mark a file added.'''
546 self._addpath(f, 'a', 0, -1, -1)
552 self._addpath(f, 'a', 0, -1, -1)
547 if f in self._copymap:
553 if f in self._copymap:
548 del self._copymap[f]
554 del self._copymap[f]
549
555
550 def remove(self, f):
556 def remove(self, f):
551 '''Mark a file removed.'''
557 '''Mark a file removed.'''
552 self._dirty = True
558 self._dirty = True
553 self._droppath(f)
559 self._droppath(f)
554 size = 0
560 size = 0
555 if self._pl[1] != nullid and f in self._map:
561 if self._pl[1] != nullid and f in self._map:
556 # backup the previous state
562 # backup the previous state
557 entry = self._map[f]
563 entry = self._map[f]
558 if entry[0] == 'm': # merge
564 if entry[0] == 'm': # merge
559 size = -1
565 size = -1
560 elif entry[0] == 'n' and entry[2] == -2: # other parent
566 elif entry[0] == 'n' and entry[2] == -2: # other parent
561 size = -2
567 size = -2
562 self._map[f] = dirstatetuple('r', 0, size, 0)
568 self._map[f] = dirstatetuple('r', 0, size, 0)
569 self._nonnormalset.add(f)
563 if size == 0 and f in self._copymap:
570 if size == 0 and f in self._copymap:
564 del self._copymap[f]
571 del self._copymap[f]
565
572
566 def merge(self, f):
573 def merge(self, f):
567 '''Mark a file merged.'''
574 '''Mark a file merged.'''
568 if self._pl[1] == nullid:
575 if self._pl[1] == nullid:
569 return self.normallookup(f)
576 return self.normallookup(f)
570 return self.otherparent(f)
577 return self.otherparent(f)
571
578
572 def drop(self, f):
579 def drop(self, f):
573 '''Drop a file from the dirstate'''
580 '''Drop a file from the dirstate'''
574 if f in self._map:
581 if f in self._map:
575 self._dirty = True
582 self._dirty = True
576 self._droppath(f)
583 self._droppath(f)
577 del self._map[f]
584 del self._map[f]
585 if f in self._nonnormalset:
586 self._nonnormalset.remove(f)
578
587
579 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
580 if exists is None:
589 if exists is None:
581 exists = os.path.lexists(os.path.join(self._root, path))
590 exists = os.path.lexists(os.path.join(self._root, path))
582 if not exists:
591 if not exists:
583 # Maybe a path component exists
592 # Maybe a path component exists
584 if not ignoremissing and '/' in path:
593 if not ignoremissing and '/' in path:
585 d, f = path.rsplit('/', 1)
594 d, f = path.rsplit('/', 1)
586 d = self._normalize(d, False, ignoremissing, None)
595 d = self._normalize(d, False, ignoremissing, None)
587 folded = d + "/" + f
596 folded = d + "/" + f
588 else:
597 else:
589 # No path components, preserve original case
598 # No path components, preserve original case
590 folded = path
599 folded = path
591 else:
600 else:
592 # recursively normalize leading directory components
601 # recursively normalize leading directory components
593 # against dirstate
602 # against dirstate
594 if '/' in normed:
603 if '/' in normed:
595 d, f = normed.rsplit('/', 1)
604 d, f = normed.rsplit('/', 1)
596 d = self._normalize(d, False, ignoremissing, True)
605 d = self._normalize(d, False, ignoremissing, True)
597 r = self._root + "/" + d
606 r = self._root + "/" + d
598 folded = d + "/" + util.fspath(f, r)
607 folded = d + "/" + util.fspath(f, r)
599 else:
608 else:
600 folded = util.fspath(normed, self._root)
609 folded = util.fspath(normed, self._root)
601 storemap[normed] = folded
610 storemap[normed] = folded
602
611
603 return folded
612 return folded
604
613
605 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
606 normed = util.normcase(path)
615 normed = util.normcase(path)
607 folded = self._filefoldmap.get(normed, None)
616 folded = self._filefoldmap.get(normed, None)
608 if folded is None:
617 if folded is None:
609 if isknown:
618 if isknown:
610 folded = path
619 folded = path
611 else:
620 else:
612 folded = self._discoverpath(path, normed, ignoremissing, exists,
621 folded = self._discoverpath(path, normed, ignoremissing, exists,
613 self._filefoldmap)
622 self._filefoldmap)
614 return folded
623 return folded
615
624
616 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
625 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
617 normed = util.normcase(path)
626 normed = util.normcase(path)
618 folded = self._filefoldmap.get(normed, None)
627 folded = self._filefoldmap.get(normed, None)
619 if folded is None:
628 if folded is None:
620 folded = self._dirfoldmap.get(normed, None)
629 folded = self._dirfoldmap.get(normed, None)
621 if folded is None:
630 if folded is None:
622 if isknown:
631 if isknown:
623 folded = path
632 folded = path
624 else:
633 else:
625 # store discovered result in dirfoldmap so that future
634 # store discovered result in dirfoldmap so that future
626 # normalizefile calls don't start matching directories
635 # normalizefile calls don't start matching directories
627 folded = self._discoverpath(path, normed, ignoremissing, exists,
636 folded = self._discoverpath(path, normed, ignoremissing, exists,
628 self._dirfoldmap)
637 self._dirfoldmap)
629 return folded
638 return folded
630
639
631 def normalize(self, path, isknown=False, ignoremissing=False):
640 def normalize(self, path, isknown=False, ignoremissing=False):
632 '''
641 '''
633 normalize the case of a pathname when on a casefolding filesystem
642 normalize the case of a pathname when on a casefolding filesystem
634
643
635 isknown specifies whether the filename came from walking the
644 isknown specifies whether the filename came from walking the
636 disk, to avoid extra filesystem access.
645 disk, to avoid extra filesystem access.
637
646
638 If ignoremissing is True, missing path are returned
647 If ignoremissing is True, missing path are returned
639 unchanged. Otherwise, we try harder to normalize possibly
648 unchanged. Otherwise, we try harder to normalize possibly
640 existing path components.
649 existing path components.
641
650
642 The normalized case is determined based on the following precedence:
651 The normalized case is determined based on the following precedence:
643
652
644 - version of name already stored in the dirstate
653 - version of name already stored in the dirstate
645 - version of name stored on disk
654 - version of name stored on disk
646 - version provided via command arguments
655 - version provided via command arguments
647 '''
656 '''
648
657
649 if self._checkcase:
658 if self._checkcase:
650 return self._normalize(path, isknown, ignoremissing)
659 return self._normalize(path, isknown, ignoremissing)
651 return path
660 return path
652
661
653 def clear(self):
662 def clear(self):
654 self._map = {}
663 self._map = {}
664 self._nonnormalset = set()
655 if "_dirs" in self.__dict__:
665 if "_dirs" in self.__dict__:
656 delattr(self, "_dirs")
666 delattr(self, "_dirs")
657 self._copymap = {}
667 self._copymap = {}
658 self._pl = [nullid, nullid]
668 self._pl = [nullid, nullid]
659 self._lastnormaltime = 0
669 self._lastnormaltime = 0
660 self._dirty = True
670 self._dirty = True
661
671
662 def rebuild(self, parent, allfiles, changedfiles=None):
672 def rebuild(self, parent, allfiles, changedfiles=None):
663 if changedfiles is None:
673 if changedfiles is None:
664 # Rebuild entire dirstate
674 # Rebuild entire dirstate
665 changedfiles = allfiles
675 changedfiles = allfiles
666 lastnormaltime = self._lastnormaltime
676 lastnormaltime = self._lastnormaltime
667 self.clear()
677 self.clear()
668 self._lastnormaltime = lastnormaltime
678 self._lastnormaltime = lastnormaltime
669
679
670 for f in changedfiles:
680 for f in changedfiles:
671 mode = 0o666
681 mode = 0o666
672 if f in allfiles and 'x' in allfiles.flags(f):
682 if f in allfiles and 'x' in allfiles.flags(f):
673 mode = 0o777
683 mode = 0o777
674
684
675 if f in allfiles:
685 if f in allfiles:
676 self._map[f] = dirstatetuple('n', mode, -1, 0)
686 self._map[f] = dirstatetuple('n', mode, -1, 0)
677 else:
687 else:
678 self._map.pop(f, None)
688 self._map.pop(f, None)
689 if f in self._nonnormalset:
690 self._nonnormalset.remove(f)
679
691
680 self._pl = (parent, nullid)
692 self._pl = (parent, nullid)
681 self._dirty = True
693 self._dirty = True
682
694
683 def write(self, tr=False):
695 def write(self, tr=False):
684 if not self._dirty:
696 if not self._dirty:
685 return
697 return
686
698
687 filename = self._filename
699 filename = self._filename
688 if tr is False: # not explicitly specified
700 if tr is False: # not explicitly specified
689 if (self._ui.configbool('devel', 'all-warnings')
701 if (self._ui.configbool('devel', 'all-warnings')
690 or self._ui.configbool('devel', 'check-dirstate-write')):
702 or self._ui.configbool('devel', 'check-dirstate-write')):
691 self._ui.develwarn('use dirstate.write with '
703 self._ui.develwarn('use dirstate.write with '
692 'repo.currenttransaction()')
704 'repo.currenttransaction()')
693
705
694 if self._opener.lexists(self._pendingfilename):
706 if self._opener.lexists(self._pendingfilename):
695 # if pending file already exists, in-memory changes
707 # if pending file already exists, in-memory changes
696 # should be written into it, because it has priority
708 # should be written into it, because it has priority
697 # to '.hg/dirstate' at reading under HG_PENDING mode
709 # to '.hg/dirstate' at reading under HG_PENDING mode
698 filename = self._pendingfilename
710 filename = self._pendingfilename
699 elif tr:
711 elif tr:
700 # 'dirstate.write()' is not only for writing in-memory
712 # 'dirstate.write()' is not only for writing in-memory
701 # changes out, but also for dropping ambiguous timestamp.
713 # changes out, but also for dropping ambiguous timestamp.
702 # delayed writing re-raise "ambiguous timestamp issue".
714 # delayed writing re-raise "ambiguous timestamp issue".
703 # See also the wiki page below for detail:
715 # See also the wiki page below for detail:
704 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
716 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
705
717
706 # emulate dropping timestamp in 'parsers.pack_dirstate'
718 # emulate dropping timestamp in 'parsers.pack_dirstate'
707 now = _getfsnow(self._opener)
719 now = _getfsnow(self._opener)
708 dmap = self._map
720 dmap = self._map
709 for f, e in dmap.iteritems():
721 for f, e in dmap.iteritems():
710 if e[0] == 'n' and e[3] == now:
722 if e[0] == 'n' and e[3] == now:
711 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
723 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
724 self._nonnormalset.add(f)
712
725
713 # emulate that all 'dirstate.normal' results are written out
726 # emulate that all 'dirstate.normal' results are written out
714 self._lastnormaltime = 0
727 self._lastnormaltime = 0
715
728
716 # delay writing in-memory changes out
729 # delay writing in-memory changes out
717 tr.addfilegenerator('dirstate', (self._filename,),
730 tr.addfilegenerator('dirstate', (self._filename,),
718 self._writedirstate, location='plain')
731 self._writedirstate, location='plain')
719 return
732 return
720
733
721 st = self._opener(filename, "w", atomictemp=True)
734 st = self._opener(filename, "w", atomictemp=True)
722 self._writedirstate(st)
735 self._writedirstate(st)
723
736
724 def _writedirstate(self, st):
737 def _writedirstate(self, st):
725 # use the modification time of the newly created temporary file as the
738 # use the modification time of the newly created temporary file as the
726 # filesystem's notion of 'now'
739 # filesystem's notion of 'now'
727 now = util.fstat(st).st_mtime & _rangemask
740 now = util.fstat(st).st_mtime & _rangemask
728
741
729 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
742 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
730 # timestamp of each entries in dirstate, because of 'now > mtime'
743 # timestamp of each entries in dirstate, because of 'now > mtime'
731 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
744 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
732 if delaywrite > 0:
745 if delaywrite > 0:
733 # do we have any files to delay for?
746 # do we have any files to delay for?
734 for f, e in self._map.iteritems():
747 for f, e in self._map.iteritems():
735 if e[0] == 'n' and e[3] == now:
748 if e[0] == 'n' and e[3] == now:
736 import time # to avoid useless import
749 import time # to avoid useless import
737 # rather than sleep n seconds, sleep until the next
750 # rather than sleep n seconds, sleep until the next
738 # multiple of n seconds
751 # multiple of n seconds
739 clock = time.time()
752 clock = time.time()
740 start = int(clock) - (int(clock) % delaywrite)
753 start = int(clock) - (int(clock) % delaywrite)
741 end = start + delaywrite
754 end = start + delaywrite
742 time.sleep(end - clock)
755 time.sleep(end - clock)
743 break
756 break
744
757
745 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
758 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
759 self._nonnormalset = nonnormalentries(self._map)
746 st.close()
760 st.close()
747 self._lastnormaltime = 0
761 self._lastnormaltime = 0
748 self._dirty = self._dirtypl = False
762 self._dirty = self._dirtypl = False
749
763
750 def _dirignore(self, f):
764 def _dirignore(self, f):
751 if f == '.':
765 if f == '.':
752 return False
766 return False
753 if self._ignore(f):
767 if self._ignore(f):
754 return True
768 return True
755 for p in util.finddirs(f):
769 for p in util.finddirs(f):
756 if self._ignore(p):
770 if self._ignore(p):
757 return True
771 return True
758 return False
772 return False
759
773
760 def _walkexplicit(self, match, subrepos):
774 def _walkexplicit(self, match, subrepos):
761 '''Get stat data about the files explicitly specified by match.
775 '''Get stat data about the files explicitly specified by match.
762
776
763 Return a triple (results, dirsfound, dirsnotfound).
777 Return a triple (results, dirsfound, dirsnotfound).
764 - results is a mapping from filename to stat result. It also contains
778 - results is a mapping from filename to stat result. It also contains
765 listings mapping subrepos and .hg to None.
779 listings mapping subrepos and .hg to None.
766 - dirsfound is a list of files found to be directories.
780 - dirsfound is a list of files found to be directories.
767 - dirsnotfound is a list of files that the dirstate thinks are
781 - dirsnotfound is a list of files that the dirstate thinks are
768 directories and that were not found.'''
782 directories and that were not found.'''
769
783
770 def badtype(mode):
784 def badtype(mode):
771 kind = _('unknown')
785 kind = _('unknown')
772 if stat.S_ISCHR(mode):
786 if stat.S_ISCHR(mode):
773 kind = _('character device')
787 kind = _('character device')
774 elif stat.S_ISBLK(mode):
788 elif stat.S_ISBLK(mode):
775 kind = _('block device')
789 kind = _('block device')
776 elif stat.S_ISFIFO(mode):
790 elif stat.S_ISFIFO(mode):
777 kind = _('fifo')
791 kind = _('fifo')
778 elif stat.S_ISSOCK(mode):
792 elif stat.S_ISSOCK(mode):
779 kind = _('socket')
793 kind = _('socket')
780 elif stat.S_ISDIR(mode):
794 elif stat.S_ISDIR(mode):
781 kind = _('directory')
795 kind = _('directory')
782 return _('unsupported file type (type is %s)') % kind
796 return _('unsupported file type (type is %s)') % kind
783
797
784 matchedir = match.explicitdir
798 matchedir = match.explicitdir
785 badfn = match.bad
799 badfn = match.bad
786 dmap = self._map
800 dmap = self._map
787 lstat = os.lstat
801 lstat = os.lstat
788 getkind = stat.S_IFMT
802 getkind = stat.S_IFMT
789 dirkind = stat.S_IFDIR
803 dirkind = stat.S_IFDIR
790 regkind = stat.S_IFREG
804 regkind = stat.S_IFREG
791 lnkkind = stat.S_IFLNK
805 lnkkind = stat.S_IFLNK
792 join = self._join
806 join = self._join
793 dirsfound = []
807 dirsfound = []
794 foundadd = dirsfound.append
808 foundadd = dirsfound.append
795 dirsnotfound = []
809 dirsnotfound = []
796 notfoundadd = dirsnotfound.append
810 notfoundadd = dirsnotfound.append
797
811
798 if not match.isexact() and self._checkcase:
812 if not match.isexact() and self._checkcase:
799 normalize = self._normalize
813 normalize = self._normalize
800 else:
814 else:
801 normalize = None
815 normalize = None
802
816
803 files = sorted(match.files())
817 files = sorted(match.files())
804 subrepos.sort()
818 subrepos.sort()
805 i, j = 0, 0
819 i, j = 0, 0
806 while i < len(files) and j < len(subrepos):
820 while i < len(files) and j < len(subrepos):
807 subpath = subrepos[j] + "/"
821 subpath = subrepos[j] + "/"
808 if files[i] < subpath:
822 if files[i] < subpath:
809 i += 1
823 i += 1
810 continue
824 continue
811 while i < len(files) and files[i].startswith(subpath):
825 while i < len(files) and files[i].startswith(subpath):
812 del files[i]
826 del files[i]
813 j += 1
827 j += 1
814
828
815 if not files or '.' in files:
829 if not files or '.' in files:
816 files = ['.']
830 files = ['.']
817 results = dict.fromkeys(subrepos)
831 results = dict.fromkeys(subrepos)
818 results['.hg'] = None
832 results['.hg'] = None
819
833
820 alldirs = None
834 alldirs = None
821 for ff in files:
835 for ff in files:
822 # constructing the foldmap is expensive, so don't do it for the
836 # constructing the foldmap is expensive, so don't do it for the
823 # common case where files is ['.']
837 # common case where files is ['.']
824 if normalize and ff != '.':
838 if normalize and ff != '.':
825 nf = normalize(ff, False, True)
839 nf = normalize(ff, False, True)
826 else:
840 else:
827 nf = ff
841 nf = ff
828 if nf in results:
842 if nf in results:
829 continue
843 continue
830
844
831 try:
845 try:
832 st = lstat(join(nf))
846 st = lstat(join(nf))
833 kind = getkind(st.st_mode)
847 kind = getkind(st.st_mode)
834 if kind == dirkind:
848 if kind == dirkind:
835 if nf in dmap:
849 if nf in dmap:
836 # file replaced by dir on disk but still in dirstate
850 # file replaced by dir on disk but still in dirstate
837 results[nf] = None
851 results[nf] = None
838 if matchedir:
852 if matchedir:
839 matchedir(nf)
853 matchedir(nf)
840 foundadd((nf, ff))
854 foundadd((nf, ff))
841 elif kind == regkind or kind == lnkkind:
855 elif kind == regkind or kind == lnkkind:
842 results[nf] = st
856 results[nf] = st
843 else:
857 else:
844 badfn(ff, badtype(kind))
858 badfn(ff, badtype(kind))
845 if nf in dmap:
859 if nf in dmap:
846 results[nf] = None
860 results[nf] = None
847 except OSError as inst: # nf not found on disk - it is dirstate only
861 except OSError as inst: # nf not found on disk - it is dirstate only
848 if nf in dmap: # does it exactly match a missing file?
862 if nf in dmap: # does it exactly match a missing file?
849 results[nf] = None
863 results[nf] = None
850 else: # does it match a missing directory?
864 else: # does it match a missing directory?
851 if alldirs is None:
865 if alldirs is None:
852 alldirs = util.dirs(dmap)
866 alldirs = util.dirs(dmap)
853 if nf in alldirs:
867 if nf in alldirs:
854 if matchedir:
868 if matchedir:
855 matchedir(nf)
869 matchedir(nf)
856 notfoundadd(nf)
870 notfoundadd(nf)
857 else:
871 else:
858 badfn(ff, inst.strerror)
872 badfn(ff, inst.strerror)
859
873
860 # Case insensitive filesystems cannot rely on lstat() failing to detect
874 # Case insensitive filesystems cannot rely on lstat() failing to detect
861 # a case-only rename. Prune the stat object for any file that does not
875 # a case-only rename. Prune the stat object for any file that does not
862 # match the case in the filesystem, if there are multiple files that
876 # match the case in the filesystem, if there are multiple files that
863 # normalize to the same path.
877 # normalize to the same path.
864 if match.isexact() and self._checkcase:
878 if match.isexact() and self._checkcase:
865 normed = {}
879 normed = {}
866
880
867 for f, st in results.iteritems():
881 for f, st in results.iteritems():
868 if st is None:
882 if st is None:
869 continue
883 continue
870
884
871 nc = util.normcase(f)
885 nc = util.normcase(f)
872 paths = normed.get(nc)
886 paths = normed.get(nc)
873
887
874 if paths is None:
888 if paths is None:
875 paths = set()
889 paths = set()
876 normed[nc] = paths
890 normed[nc] = paths
877
891
878 paths.add(f)
892 paths.add(f)
879
893
880 for norm, paths in normed.iteritems():
894 for norm, paths in normed.iteritems():
881 if len(paths) > 1:
895 if len(paths) > 1:
882 for path in paths:
896 for path in paths:
883 folded = self._discoverpath(path, norm, True, None,
897 folded = self._discoverpath(path, norm, True, None,
884 self._dirfoldmap)
898 self._dirfoldmap)
885 if path != folded:
899 if path != folded:
886 results[path] = None
900 results[path] = None
887
901
888 return results, dirsfound, dirsnotfound
902 return results, dirsfound, dirsnotfound
889
903
890 def walk(self, match, subrepos, unknown, ignored, full=True):
904 def walk(self, match, subrepos, unknown, ignored, full=True):
891 '''
905 '''
892 Walk recursively through the directory tree, finding all files
906 Walk recursively through the directory tree, finding all files
893 matched by match.
907 matched by match.
894
908
895 If full is False, maybe skip some known-clean files.
909 If full is False, maybe skip some known-clean files.
896
910
897 Return a dict mapping filename to stat-like object (either
911 Return a dict mapping filename to stat-like object (either
898 mercurial.osutil.stat instance or return value of os.stat()).
912 mercurial.osutil.stat instance or return value of os.stat()).
899
913
900 '''
914 '''
901 # full is a flag that extensions that hook into walk can use -- this
915 # full is a flag that extensions that hook into walk can use -- this
902 # implementation doesn't use it at all. This satisfies the contract
916 # implementation doesn't use it at all. This satisfies the contract
903 # because we only guarantee a "maybe".
917 # because we only guarantee a "maybe".
904
918
905 if ignored:
919 if ignored:
906 ignore = util.never
920 ignore = util.never
907 dirignore = util.never
921 dirignore = util.never
908 elif unknown:
922 elif unknown:
909 ignore = self._ignore
923 ignore = self._ignore
910 dirignore = self._dirignore
924 dirignore = self._dirignore
911 else:
925 else:
912 # if not unknown and not ignored, drop dir recursion and step 2
926 # if not unknown and not ignored, drop dir recursion and step 2
913 ignore = util.always
927 ignore = util.always
914 dirignore = util.always
928 dirignore = util.always
915
929
916 matchfn = match.matchfn
930 matchfn = match.matchfn
917 matchalways = match.always()
931 matchalways = match.always()
918 matchtdir = match.traversedir
932 matchtdir = match.traversedir
919 dmap = self._map
933 dmap = self._map
920 listdir = osutil.listdir
934 listdir = osutil.listdir
921 lstat = os.lstat
935 lstat = os.lstat
922 dirkind = stat.S_IFDIR
936 dirkind = stat.S_IFDIR
923 regkind = stat.S_IFREG
937 regkind = stat.S_IFREG
924 lnkkind = stat.S_IFLNK
938 lnkkind = stat.S_IFLNK
925 join = self._join
939 join = self._join
926
940
927 exact = skipstep3 = False
941 exact = skipstep3 = False
928 if match.isexact(): # match.exact
942 if match.isexact(): # match.exact
929 exact = True
943 exact = True
930 dirignore = util.always # skip step 2
944 dirignore = util.always # skip step 2
931 elif match.prefix(): # match.match, no patterns
945 elif match.prefix(): # match.match, no patterns
932 skipstep3 = True
946 skipstep3 = True
933
947
934 if not exact and self._checkcase:
948 if not exact and self._checkcase:
935 normalize = self._normalize
949 normalize = self._normalize
936 normalizefile = self._normalizefile
950 normalizefile = self._normalizefile
937 skipstep3 = False
951 skipstep3 = False
938 else:
952 else:
939 normalize = self._normalize
953 normalize = self._normalize
940 normalizefile = None
954 normalizefile = None
941
955
942 # step 1: find all explicit files
956 # step 1: find all explicit files
943 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
957 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
944
958
945 skipstep3 = skipstep3 and not (work or dirsnotfound)
959 skipstep3 = skipstep3 and not (work or dirsnotfound)
946 work = [d for d in work if not dirignore(d[0])]
960 work = [d for d in work if not dirignore(d[0])]
947
961
948 # step 2: visit subdirectories
962 # step 2: visit subdirectories
949 def traverse(work, alreadynormed):
963 def traverse(work, alreadynormed):
950 wadd = work.append
964 wadd = work.append
951 while work:
965 while work:
952 nd = work.pop()
966 nd = work.pop()
953 skip = None
967 skip = None
954 if nd == '.':
968 if nd == '.':
955 nd = ''
969 nd = ''
956 else:
970 else:
957 skip = '.hg'
971 skip = '.hg'
958 try:
972 try:
959 entries = listdir(join(nd), stat=True, skip=skip)
973 entries = listdir(join(nd), stat=True, skip=skip)
960 except OSError as inst:
974 except OSError as inst:
961 if inst.errno in (errno.EACCES, errno.ENOENT):
975 if inst.errno in (errno.EACCES, errno.ENOENT):
962 match.bad(self.pathto(nd), inst.strerror)
976 match.bad(self.pathto(nd), inst.strerror)
963 continue
977 continue
964 raise
978 raise
965 for f, kind, st in entries:
979 for f, kind, st in entries:
966 if normalizefile:
980 if normalizefile:
967 # even though f might be a directory, we're only
981 # even though f might be a directory, we're only
968 # interested in comparing it to files currently in the
982 # interested in comparing it to files currently in the
969 # dmap -- therefore normalizefile is enough
983 # dmap -- therefore normalizefile is enough
970 nf = normalizefile(nd and (nd + "/" + f) or f, True,
984 nf = normalizefile(nd and (nd + "/" + f) or f, True,
971 True)
985 True)
972 else:
986 else:
973 nf = nd and (nd + "/" + f) or f
987 nf = nd and (nd + "/" + f) or f
974 if nf not in results:
988 if nf not in results:
975 if kind == dirkind:
989 if kind == dirkind:
976 if not ignore(nf):
990 if not ignore(nf):
977 if matchtdir:
991 if matchtdir:
978 matchtdir(nf)
992 matchtdir(nf)
979 wadd(nf)
993 wadd(nf)
980 if nf in dmap and (matchalways or matchfn(nf)):
994 if nf in dmap and (matchalways or matchfn(nf)):
981 results[nf] = None
995 results[nf] = None
982 elif kind == regkind or kind == lnkkind:
996 elif kind == regkind or kind == lnkkind:
983 if nf in dmap:
997 if nf in dmap:
984 if matchalways or matchfn(nf):
998 if matchalways or matchfn(nf):
985 results[nf] = st
999 results[nf] = st
986 elif ((matchalways or matchfn(nf))
1000 elif ((matchalways or matchfn(nf))
987 and not ignore(nf)):
1001 and not ignore(nf)):
988 # unknown file -- normalize if necessary
1002 # unknown file -- normalize if necessary
989 if not alreadynormed:
1003 if not alreadynormed:
990 nf = normalize(nf, False, True)
1004 nf = normalize(nf, False, True)
991 results[nf] = st
1005 results[nf] = st
992 elif nf in dmap and (matchalways or matchfn(nf)):
1006 elif nf in dmap and (matchalways or matchfn(nf)):
993 results[nf] = None
1007 results[nf] = None
994
1008
995 for nd, d in work:
1009 for nd, d in work:
996 # alreadynormed means that processwork doesn't have to do any
1010 # alreadynormed means that processwork doesn't have to do any
997 # expensive directory normalization
1011 # expensive directory normalization
998 alreadynormed = not normalize or nd == d
1012 alreadynormed = not normalize or nd == d
999 traverse([d], alreadynormed)
1013 traverse([d], alreadynormed)
1000
1014
1001 for s in subrepos:
1015 for s in subrepos:
1002 del results[s]
1016 del results[s]
1003 del results['.hg']
1017 del results['.hg']
1004
1018
1005 # step 3: visit remaining files from dmap
1019 # step 3: visit remaining files from dmap
1006 if not skipstep3 and not exact:
1020 if not skipstep3 and not exact:
1007 # If a dmap file is not in results yet, it was either
1021 # If a dmap file is not in results yet, it was either
1008 # a) not matching matchfn b) ignored, c) missing, or d) under a
1022 # a) not matching matchfn b) ignored, c) missing, or d) under a
1009 # symlink directory.
1023 # symlink directory.
1010 if not results and matchalways:
1024 if not results and matchalways:
1011 visit = dmap.keys()
1025 visit = dmap.keys()
1012 else:
1026 else:
1013 visit = [f for f in dmap if f not in results and matchfn(f)]
1027 visit = [f for f in dmap if f not in results and matchfn(f)]
1014 visit.sort()
1028 visit.sort()
1015
1029
1016 if unknown:
1030 if unknown:
1017 # unknown == True means we walked all dirs under the roots
1031 # unknown == True means we walked all dirs under the roots
1018 # that wasn't ignored, and everything that matched was stat'ed
1032 # that wasn't ignored, and everything that matched was stat'ed
1019 # and is already in results.
1033 # and is already in results.
1020 # The rest must thus be ignored or under a symlink.
1034 # The rest must thus be ignored or under a symlink.
1021 audit_path = pathutil.pathauditor(self._root)
1035 audit_path = pathutil.pathauditor(self._root)
1022
1036
1023 for nf in iter(visit):
1037 for nf in iter(visit):
1024 # If a stat for the same file was already added with a
1038 # If a stat for the same file was already added with a
1025 # different case, don't add one for this, since that would
1039 # different case, don't add one for this, since that would
1026 # make it appear as if the file exists under both names
1040 # make it appear as if the file exists under both names
1027 # on disk.
1041 # on disk.
1028 if (normalizefile and
1042 if (normalizefile and
1029 normalizefile(nf, True, True) in results):
1043 normalizefile(nf, True, True) in results):
1030 results[nf] = None
1044 results[nf] = None
1031 # Report ignored items in the dmap as long as they are not
1045 # Report ignored items in the dmap as long as they are not
1032 # under a symlink directory.
1046 # under a symlink directory.
1033 elif audit_path.check(nf):
1047 elif audit_path.check(nf):
1034 try:
1048 try:
1035 results[nf] = lstat(join(nf))
1049 results[nf] = lstat(join(nf))
1036 # file was just ignored, no links, and exists
1050 # file was just ignored, no links, and exists
1037 except OSError:
1051 except OSError:
1038 # file doesn't exist
1052 # file doesn't exist
1039 results[nf] = None
1053 results[nf] = None
1040 else:
1054 else:
1041 # It's either missing or under a symlink directory
1055 # It's either missing or under a symlink directory
1042 # which we in this case report as missing
1056 # which we in this case report as missing
1043 results[nf] = None
1057 results[nf] = None
1044 else:
1058 else:
1045 # We may not have walked the full directory tree above,
1059 # We may not have walked the full directory tree above,
1046 # so stat and check everything we missed.
1060 # so stat and check everything we missed.
1047 nf = iter(visit).next
1061 nf = iter(visit).next
1048 for st in util.statfiles([join(i) for i in visit]):
1062 for st in util.statfiles([join(i) for i in visit]):
1049 results[nf()] = st
1063 results[nf()] = st
1050 return results
1064 return results
1051
1065
1052 def status(self, match, subrepos, ignored, clean, unknown):
1066 def status(self, match, subrepos, ignored, clean, unknown):
1053 '''Determine the status of the working copy relative to the
1067 '''Determine the status of the working copy relative to the
1054 dirstate and return a pair of (unsure, status), where status is of type
1068 dirstate and return a pair of (unsure, status), where status is of type
1055 scmutil.status and:
1069 scmutil.status and:
1056
1070
1057 unsure:
1071 unsure:
1058 files that might have been modified since the dirstate was
1072 files that might have been modified since the dirstate was
1059 written, but need to be read to be sure (size is the same
1073 written, but need to be read to be sure (size is the same
1060 but mtime differs)
1074 but mtime differs)
1061 status.modified:
1075 status.modified:
1062 files that have definitely been modified since the dirstate
1076 files that have definitely been modified since the dirstate
1063 was written (different size or mode)
1077 was written (different size or mode)
1064 status.clean:
1078 status.clean:
1065 files that have definitely not been modified since the
1079 files that have definitely not been modified since the
1066 dirstate was written
1080 dirstate was written
1067 '''
1081 '''
1068 listignored, listclean, listunknown = ignored, clean, unknown
1082 listignored, listclean, listunknown = ignored, clean, unknown
1069 lookup, modified, added, unknown, ignored = [], [], [], [], []
1083 lookup, modified, added, unknown, ignored = [], [], [], [], []
1070 removed, deleted, clean = [], [], []
1084 removed, deleted, clean = [], [], []
1071
1085
1072 dmap = self._map
1086 dmap = self._map
1073 ladd = lookup.append # aka "unsure"
1087 ladd = lookup.append # aka "unsure"
1074 madd = modified.append
1088 madd = modified.append
1075 aadd = added.append
1089 aadd = added.append
1076 uadd = unknown.append
1090 uadd = unknown.append
1077 iadd = ignored.append
1091 iadd = ignored.append
1078 radd = removed.append
1092 radd = removed.append
1079 dadd = deleted.append
1093 dadd = deleted.append
1080 cadd = clean.append
1094 cadd = clean.append
1081 mexact = match.exact
1095 mexact = match.exact
1082 dirignore = self._dirignore
1096 dirignore = self._dirignore
1083 checkexec = self._checkexec
1097 checkexec = self._checkexec
1084 copymap = self._copymap
1098 copymap = self._copymap
1085 lastnormaltime = self._lastnormaltime
1099 lastnormaltime = self._lastnormaltime
1086
1100
1087 # We need to do full walks when either
1101 # We need to do full walks when either
1088 # - we're listing all clean files, or
1102 # - we're listing all clean files, or
1089 # - match.traversedir does something, because match.traversedir should
1103 # - match.traversedir does something, because match.traversedir should
1090 # be called for every dir in the working dir
1104 # be called for every dir in the working dir
1091 full = listclean or match.traversedir is not None
1105 full = listclean or match.traversedir is not None
1092 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1106 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1093 full=full).iteritems():
1107 full=full).iteritems():
1094 if fn not in dmap:
1108 if fn not in dmap:
1095 if (listignored or mexact(fn)) and dirignore(fn):
1109 if (listignored or mexact(fn)) and dirignore(fn):
1096 if listignored:
1110 if listignored:
1097 iadd(fn)
1111 iadd(fn)
1098 else:
1112 else:
1099 uadd(fn)
1113 uadd(fn)
1100 continue
1114 continue
1101
1115
1102 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1116 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1103 # written like that for performance reasons. dmap[fn] is not a
1117 # written like that for performance reasons. dmap[fn] is not a
1104 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1118 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1105 # opcode has fast paths when the value to be unpacked is a tuple or
1119 # opcode has fast paths when the value to be unpacked is a tuple or
1106 # a list, but falls back to creating a full-fledged iterator in
1120 # a list, but falls back to creating a full-fledged iterator in
1107 # general. That is much slower than simply accessing and storing the
1121 # general. That is much slower than simply accessing and storing the
1108 # tuple members one by one.
1122 # tuple members one by one.
1109 t = dmap[fn]
1123 t = dmap[fn]
1110 state = t[0]
1124 state = t[0]
1111 mode = t[1]
1125 mode = t[1]
1112 size = t[2]
1126 size = t[2]
1113 time = t[3]
1127 time = t[3]
1114
1128
1115 if not st and state in "nma":
1129 if not st and state in "nma":
1116 dadd(fn)
1130 dadd(fn)
1117 elif state == 'n':
1131 elif state == 'n':
1118 if (size >= 0 and
1132 if (size >= 0 and
1119 ((size != st.st_size and size != st.st_size & _rangemask)
1133 ((size != st.st_size and size != st.st_size & _rangemask)
1120 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1134 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1121 or size == -2 # other parent
1135 or size == -2 # other parent
1122 or fn in copymap):
1136 or fn in copymap):
1123 madd(fn)
1137 madd(fn)
1124 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1138 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1125 ladd(fn)
1139 ladd(fn)
1126 elif st.st_mtime == lastnormaltime:
1140 elif st.st_mtime == lastnormaltime:
1127 # fn may have just been marked as normal and it may have
1141 # fn may have just been marked as normal and it may have
1128 # changed in the same second without changing its size.
1142 # changed in the same second without changing its size.
1129 # This can happen if we quickly do multiple commits.
1143 # This can happen if we quickly do multiple commits.
1130 # Force lookup, so we don't miss such a racy file change.
1144 # Force lookup, so we don't miss such a racy file change.
1131 ladd(fn)
1145 ladd(fn)
1132 elif listclean:
1146 elif listclean:
1133 cadd(fn)
1147 cadd(fn)
1134 elif state == 'm':
1148 elif state == 'm':
1135 madd(fn)
1149 madd(fn)
1136 elif state == 'a':
1150 elif state == 'a':
1137 aadd(fn)
1151 aadd(fn)
1138 elif state == 'r':
1152 elif state == 'r':
1139 radd(fn)
1153 radd(fn)
1140
1154
1141 return (lookup, scmutil.status(modified, added, removed, deleted,
1155 return (lookup, scmutil.status(modified, added, removed, deleted,
1142 unknown, ignored, clean))
1156 unknown, ignored, clean))
1143
1157
1144 def matches(self, match):
1158 def matches(self, match):
1145 '''
1159 '''
1146 return files in the dirstate (in whatever state) filtered by match
1160 return files in the dirstate (in whatever state) filtered by match
1147 '''
1161 '''
1148 dmap = self._map
1162 dmap = self._map
1149 if match.always():
1163 if match.always():
1150 return dmap.keys()
1164 return dmap.keys()
1151 files = match.files()
1165 files = match.files()
1152 if match.isexact():
1166 if match.isexact():
1153 # fast path -- filter the other way around, since typically files is
1167 # fast path -- filter the other way around, since typically files is
1154 # much smaller than dmap
1168 # much smaller than dmap
1155 return [f for f in files if f in dmap]
1169 return [f for f in files if f in dmap]
1156 if match.prefix() and all(fn in dmap for fn in files):
1170 if match.prefix() and all(fn in dmap for fn in files):
1157 # fast path -- all the values are known to be files, so just return
1171 # fast path -- all the values are known to be files, so just return
1158 # that
1172 # that
1159 return list(files)
1173 return list(files)
1160 return [f for f in dmap if match(f)]
1174 return [f for f in dmap if match(f)]
1161
1175
1162 def _actualfilename(self, tr):
1176 def _actualfilename(self, tr):
1163 if tr:
1177 if tr:
1164 return self._pendingfilename
1178 return self._pendingfilename
1165 else:
1179 else:
1166 return self._filename
1180 return self._filename
1167
1181
1168 def _savebackup(self, tr, suffix):
1182 def _savebackup(self, tr, suffix):
1169 '''Save current dirstate into backup file with suffix'''
1183 '''Save current dirstate into backup file with suffix'''
1170 filename = self._actualfilename(tr)
1184 filename = self._actualfilename(tr)
1171
1185
1172 # use '_writedirstate' instead of 'write' to write changes certainly,
1186 # use '_writedirstate' instead of 'write' to write changes certainly,
1173 # because the latter omits writing out if transaction is running.
1187 # because the latter omits writing out if transaction is running.
1174 # output file will be used to create backup of dirstate at this point.
1188 # output file will be used to create backup of dirstate at this point.
1175 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1189 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1176
1190
1177 if tr:
1191 if tr:
1178 # ensure that subsequent tr.writepending returns True for
1192 # ensure that subsequent tr.writepending returns True for
1179 # changes written out above, even if dirstate is never
1193 # changes written out above, even if dirstate is never
1180 # changed after this
1194 # changed after this
1181 tr.addfilegenerator('dirstate', (self._filename,),
1195 tr.addfilegenerator('dirstate', (self._filename,),
1182 self._writedirstate, location='plain')
1196 self._writedirstate, location='plain')
1183
1197
1184 # ensure that pending file written above is unlinked at
1198 # ensure that pending file written above is unlinked at
1185 # failure, even if tr.writepending isn't invoked until the
1199 # failure, even if tr.writepending isn't invoked until the
1186 # end of this transaction
1200 # end of this transaction
1187 tr.registertmp(filename, location='plain')
1201 tr.registertmp(filename, location='plain')
1188
1202
1189 self._opener.write(filename + suffix, self._opener.tryread(filename))
1203 self._opener.write(filename + suffix, self._opener.tryread(filename))
1190
1204
1191 def _restorebackup(self, tr, suffix):
1205 def _restorebackup(self, tr, suffix):
1192 '''Restore dirstate by backup file with suffix'''
1206 '''Restore dirstate by backup file with suffix'''
1193 # this "invalidate()" prevents "wlock.release()" from writing
1207 # this "invalidate()" prevents "wlock.release()" from writing
1194 # changes of dirstate out after restoring from backup file
1208 # changes of dirstate out after restoring from backup file
1195 self.invalidate()
1209 self.invalidate()
1196 filename = self._actualfilename(tr)
1210 filename = self._actualfilename(tr)
1197 self._opener.rename(filename + suffix, filename)
1211 self._opener.rename(filename + suffix, filename)
1198
1212
1199 def _clearbackup(self, tr, suffix):
1213 def _clearbackup(self, tr, suffix):
1200 '''Clear backup file with suffix'''
1214 '''Clear backup file with suffix'''
1201 filename = self._actualfilename(tr)
1215 filename = self._actualfilename(tr)
1202 self._opener.unlink(filename + suffix)
1216 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now