##// END OF EJS Templates
dirstate: fix filefoldmap incosistency on file delete...
Mateusz Kwapich -
r26887:663eff02 stable
parent child Browse files
Show More
@@ -1,1167 +1,1172 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, osutil, parsers, encoding, pathutil, error
10 import scmutil, util, osutil, parsers, encoding, pathutil, error
11 import os, stat, errno
11 import os, stat, errno
12 import match as matchmod
12 import match as matchmod
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 dirstatetuple = parsers.dirstatetuple
18 dirstatetuple = parsers.dirstatetuple
19
19
20 class repocache(filecache):
20 class repocache(filecache):
21 """filecache for files in .hg/"""
21 """filecache for files in .hg/"""
22 def join(self, obj, fname):
22 def join(self, obj, fname):
23 return obj._opener.join(fname)
23 return obj._opener.join(fname)
24
24
25 class rootcache(filecache):
25 class rootcache(filecache):
26 """filecache for files in the repository root"""
26 """filecache for files in the repository root"""
27 def join(self, obj, fname):
27 def join(self, obj, fname):
28 return obj._join(fname)
28 return obj._join(fname)
29
29
30 def _getfsnow(vfs):
30 def _getfsnow(vfs):
31 '''Get "now" timestamp on filesystem'''
31 '''Get "now" timestamp on filesystem'''
32 tmpfd, tmpname = vfs.mkstemp()
32 tmpfd, tmpname = vfs.mkstemp()
33 try:
33 try:
34 return util.statmtimesec(os.fstat(tmpfd))
34 return util.statmtimesec(os.fstat(tmpfd))
35 finally:
35 finally:
36 os.close(tmpfd)
36 os.close(tmpfd)
37 vfs.unlink(tmpname)
37 vfs.unlink(tmpname)
38
38
39 def _trypending(root, vfs, filename):
39 def _trypending(root, vfs, filename):
40 '''Open file to be read according to HG_PENDING environment variable
40 '''Open file to be read according to HG_PENDING environment variable
41
41
42 This opens '.pending' of specified 'filename' only when HG_PENDING
42 This opens '.pending' of specified 'filename' only when HG_PENDING
43 is equal to 'root'.
43 is equal to 'root'.
44
44
45 This returns '(fp, is_pending_opened)' tuple.
45 This returns '(fp, is_pending_opened)' tuple.
46 '''
46 '''
47 if root == os.environ.get('HG_PENDING'):
47 if root == os.environ.get('HG_PENDING'):
48 try:
48 try:
49 return (vfs('%s.pending' % filename), True)
49 return (vfs('%s.pending' % filename), True)
50 except IOError as inst:
50 except IOError as inst:
51 if inst.errno != errno.ENOENT:
51 if inst.errno != errno.ENOENT:
52 raise
52 raise
53 return (vfs(filename), False)
53 return (vfs(filename), False)
54
54
55 class dirstate(object):
55 class dirstate(object):
56
56
57 def __init__(self, opener, ui, root, validate):
57 def __init__(self, opener, ui, root, validate):
58 '''Create a new dirstate object.
58 '''Create a new dirstate object.
59
59
60 opener is an open()-like callable that can be used to open the
60 opener is an open()-like callable that can be used to open the
61 dirstate file; root is the root of the directory tracked by
61 dirstate file; root is the root of the directory tracked by
62 the dirstate.
62 the dirstate.
63 '''
63 '''
64 self._opener = opener
64 self._opener = opener
65 self._validate = validate
65 self._validate = validate
66 self._root = root
66 self._root = root
67 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
67 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
68 # UNC path pointing to root share (issue4557)
68 # UNC path pointing to root share (issue4557)
69 self._rootdir = pathutil.normasprefix(root)
69 self._rootdir = pathutil.normasprefix(root)
70 # internal config: ui.forcecwd
70 # internal config: ui.forcecwd
71 forcecwd = ui.config('ui', 'forcecwd')
71 forcecwd = ui.config('ui', 'forcecwd')
72 if forcecwd:
72 if forcecwd:
73 self._cwd = forcecwd
73 self._cwd = forcecwd
74 self._dirty = False
74 self._dirty = False
75 self._dirtypl = False
75 self._dirtypl = False
76 self._lastnormaltime = 0
76 self._lastnormaltime = 0
77 self._ui = ui
77 self._ui = ui
78 self._filecache = {}
78 self._filecache = {}
79 self._parentwriters = 0
79 self._parentwriters = 0
80 self._filename = 'dirstate'
80 self._filename = 'dirstate'
81 self._pendingfilename = '%s.pending' % self._filename
81 self._pendingfilename = '%s.pending' % self._filename
82
82
83 # for consistent view between _pl() and _read() invocations
83 # for consistent view between _pl() and _read() invocations
84 self._pendingmode = None
84 self._pendingmode = None
85
85
86 def beginparentchange(self):
86 def beginparentchange(self):
87 '''Marks the beginning of a set of changes that involve changing
87 '''Marks the beginning of a set of changes that involve changing
88 the dirstate parents. If there is an exception during this time,
88 the dirstate parents. If there is an exception during this time,
89 the dirstate will not be written when the wlock is released. This
89 the dirstate will not be written when the wlock is released. This
90 prevents writing an incoherent dirstate where the parent doesn't
90 prevents writing an incoherent dirstate where the parent doesn't
91 match the contents.
91 match the contents.
92 '''
92 '''
93 self._parentwriters += 1
93 self._parentwriters += 1
94
94
95 def endparentchange(self):
95 def endparentchange(self):
96 '''Marks the end of a set of changes that involve changing the
96 '''Marks the end of a set of changes that involve changing the
97 dirstate parents. Once all parent changes have been marked done,
97 dirstate parents. Once all parent changes have been marked done,
98 the wlock will be free to write the dirstate on release.
98 the wlock will be free to write the dirstate on release.
99 '''
99 '''
100 if self._parentwriters > 0:
100 if self._parentwriters > 0:
101 self._parentwriters -= 1
101 self._parentwriters -= 1
102
102
103 def pendingparentchange(self):
103 def pendingparentchange(self):
104 '''Returns true if the dirstate is in the middle of a set of changes
104 '''Returns true if the dirstate is in the middle of a set of changes
105 that modify the dirstate parent.
105 that modify the dirstate parent.
106 '''
106 '''
107 return self._parentwriters > 0
107 return self._parentwriters > 0
108
108
109 @propertycache
109 @propertycache
110 def _map(self):
110 def _map(self):
111 '''Return the dirstate contents as a map from filename to
111 '''Return the dirstate contents as a map from filename to
112 (state, mode, size, time).'''
112 (state, mode, size, time).'''
113 self._read()
113 self._read()
114 return self._map
114 return self._map
115
115
116 @propertycache
116 @propertycache
117 def _copymap(self):
117 def _copymap(self):
118 self._read()
118 self._read()
119 return self._copymap
119 return self._copymap
120
120
121 @propertycache
121 @propertycache
122 def _filefoldmap(self):
122 def _filefoldmap(self):
123 try:
123 try:
124 makefilefoldmap = parsers.make_file_foldmap
124 makefilefoldmap = parsers.make_file_foldmap
125 except AttributeError:
125 except AttributeError:
126 pass
126 pass
127 else:
127 else:
128 return makefilefoldmap(self._map, util.normcasespec,
128 return makefilefoldmap(self._map, util.normcasespec,
129 util.normcasefallback)
129 util.normcasefallback)
130
130
131 f = {}
131 f = {}
132 normcase = util.normcase
132 normcase = util.normcase
133 for name, s in self._map.iteritems():
133 for name, s in self._map.iteritems():
134 if s[0] != 'r':
134 if s[0] != 'r':
135 f[normcase(name)] = name
135 f[normcase(name)] = name
136 f['.'] = '.' # prevents useless util.fspath() invocation
136 f['.'] = '.' # prevents useless util.fspath() invocation
137 return f
137 return f
138
138
139 @propertycache
139 @propertycache
140 def _dirfoldmap(self):
140 def _dirfoldmap(self):
141 f = {}
141 f = {}
142 normcase = util.normcase
142 normcase = util.normcase
143 for name in self._dirs:
143 for name in self._dirs:
144 f[normcase(name)] = name
144 f[normcase(name)] = name
145 return f
145 return f
146
146
147 @repocache('branch')
147 @repocache('branch')
148 def _branch(self):
148 def _branch(self):
149 try:
149 try:
150 return self._opener.read("branch").strip() or "default"
150 return self._opener.read("branch").strip() or "default"
151 except IOError as inst:
151 except IOError as inst:
152 if inst.errno != errno.ENOENT:
152 if inst.errno != errno.ENOENT:
153 raise
153 raise
154 return "default"
154 return "default"
155
155
156 @propertycache
156 @propertycache
157 def _pl(self):
157 def _pl(self):
158 try:
158 try:
159 fp = self._opendirstatefile()
159 fp = self._opendirstatefile()
160 st = fp.read(40)
160 st = fp.read(40)
161 fp.close()
161 fp.close()
162 l = len(st)
162 l = len(st)
163 if l == 40:
163 if l == 40:
164 return st[:20], st[20:40]
164 return st[:20], st[20:40]
165 elif l > 0 and l < 40:
165 elif l > 0 and l < 40:
166 raise error.Abort(_('working directory state appears damaged!'))
166 raise error.Abort(_('working directory state appears damaged!'))
167 except IOError as err:
167 except IOError as err:
168 if err.errno != errno.ENOENT:
168 if err.errno != errno.ENOENT:
169 raise
169 raise
170 return [nullid, nullid]
170 return [nullid, nullid]
171
171
172 @propertycache
172 @propertycache
173 def _dirs(self):
173 def _dirs(self):
174 return util.dirs(self._map, 'r')
174 return util.dirs(self._map, 'r')
175
175
176 def dirs(self):
176 def dirs(self):
177 return self._dirs
177 return self._dirs
178
178
179 @rootcache('.hgignore')
179 @rootcache('.hgignore')
180 def _ignore(self):
180 def _ignore(self):
181 files = []
181 files = []
182 if os.path.exists(self._join('.hgignore')):
182 if os.path.exists(self._join('.hgignore')):
183 files.append(self._join('.hgignore'))
183 files.append(self._join('.hgignore'))
184 for name, path in self._ui.configitems("ui"):
184 for name, path in self._ui.configitems("ui"):
185 if name == 'ignore' or name.startswith('ignore.'):
185 if name == 'ignore' or name.startswith('ignore.'):
186 # we need to use os.path.join here rather than self._join
186 # we need to use os.path.join here rather than self._join
187 # because path is arbitrary and user-specified
187 # because path is arbitrary and user-specified
188 files.append(os.path.join(self._rootdir, util.expandpath(path)))
188 files.append(os.path.join(self._rootdir, util.expandpath(path)))
189
189
190 if not files:
190 if not files:
191 return util.never
191 return util.never
192
192
193 pats = ['include:%s' % f for f in files]
193 pats = ['include:%s' % f for f in files]
194 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
194 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
195
195
196 @propertycache
196 @propertycache
197 def _slash(self):
197 def _slash(self):
198 return self._ui.configbool('ui', 'slash') and os.sep != '/'
198 return self._ui.configbool('ui', 'slash') and os.sep != '/'
199
199
200 @propertycache
200 @propertycache
201 def _checklink(self):
201 def _checklink(self):
202 return util.checklink(self._root)
202 return util.checklink(self._root)
203
203
204 @propertycache
204 @propertycache
205 def _checkexec(self):
205 def _checkexec(self):
206 return util.checkexec(self._root)
206 return util.checkexec(self._root)
207
207
208 @propertycache
208 @propertycache
209 def _checkcase(self):
209 def _checkcase(self):
210 return not util.checkcase(self._join('.hg'))
210 return not util.checkcase(self._join('.hg'))
211
211
212 def _join(self, f):
212 def _join(self, f):
213 # much faster than os.path.join()
213 # much faster than os.path.join()
214 # it's safe because f is always a relative path
214 # it's safe because f is always a relative path
215 return self._rootdir + f
215 return self._rootdir + f
216
216
217 def flagfunc(self, buildfallback):
217 def flagfunc(self, buildfallback):
218 if self._checklink and self._checkexec:
218 if self._checklink and self._checkexec:
219 def f(x):
219 def f(x):
220 try:
220 try:
221 st = os.lstat(self._join(x))
221 st = os.lstat(self._join(x))
222 if util.statislink(st):
222 if util.statislink(st):
223 return 'l'
223 return 'l'
224 if util.statisexec(st):
224 if util.statisexec(st):
225 return 'x'
225 return 'x'
226 except OSError:
226 except OSError:
227 pass
227 pass
228 return ''
228 return ''
229 return f
229 return f
230
230
231 fallback = buildfallback()
231 fallback = buildfallback()
232 if self._checklink:
232 if self._checklink:
233 def f(x):
233 def f(x):
234 if os.path.islink(self._join(x)):
234 if os.path.islink(self._join(x)):
235 return 'l'
235 return 'l'
236 if 'x' in fallback(x):
236 if 'x' in fallback(x):
237 return 'x'
237 return 'x'
238 return ''
238 return ''
239 return f
239 return f
240 if self._checkexec:
240 if self._checkexec:
241 def f(x):
241 def f(x):
242 if 'l' in fallback(x):
242 if 'l' in fallback(x):
243 return 'l'
243 return 'l'
244 if util.isexec(self._join(x)):
244 if util.isexec(self._join(x)):
245 return 'x'
245 return 'x'
246 return ''
246 return ''
247 return f
247 return f
248 else:
248 else:
249 return fallback
249 return fallback
250
250
251 @propertycache
251 @propertycache
252 def _cwd(self):
252 def _cwd(self):
253 return os.getcwd()
253 return os.getcwd()
254
254
255 def getcwd(self):
255 def getcwd(self):
256 '''Return the path from which a canonical path is calculated.
256 '''Return the path from which a canonical path is calculated.
257
257
258 This path should be used to resolve file patterns or to convert
258 This path should be used to resolve file patterns or to convert
259 canonical paths back to file paths for display. It shouldn't be
259 canonical paths back to file paths for display. It shouldn't be
260 used to get real file paths. Use vfs functions instead.
260 used to get real file paths. Use vfs functions instead.
261 '''
261 '''
262 cwd = self._cwd
262 cwd = self._cwd
263 if cwd == self._root:
263 if cwd == self._root:
264 return ''
264 return ''
265 # self._root ends with a path separator if self._root is '/' or 'C:\'
265 # self._root ends with a path separator if self._root is '/' or 'C:\'
266 rootsep = self._root
266 rootsep = self._root
267 if not util.endswithsep(rootsep):
267 if not util.endswithsep(rootsep):
268 rootsep += os.sep
268 rootsep += os.sep
269 if cwd.startswith(rootsep):
269 if cwd.startswith(rootsep):
270 return cwd[len(rootsep):]
270 return cwd[len(rootsep):]
271 else:
271 else:
272 # we're outside the repo. return an absolute path.
272 # we're outside the repo. return an absolute path.
273 return cwd
273 return cwd
274
274
275 def pathto(self, f, cwd=None):
275 def pathto(self, f, cwd=None):
276 if cwd is None:
276 if cwd is None:
277 cwd = self.getcwd()
277 cwd = self.getcwd()
278 path = util.pathto(self._root, cwd, f)
278 path = util.pathto(self._root, cwd, f)
279 if self._slash:
279 if self._slash:
280 return util.pconvert(path)
280 return util.pconvert(path)
281 return path
281 return path
282
282
283 def __getitem__(self, key):
283 def __getitem__(self, key):
284 '''Return the current state of key (a filename) in the dirstate.
284 '''Return the current state of key (a filename) in the dirstate.
285
285
286 States are:
286 States are:
287 n normal
287 n normal
288 m needs merging
288 m needs merging
289 r marked for removal
289 r marked for removal
290 a marked for addition
290 a marked for addition
291 ? not tracked
291 ? not tracked
292 '''
292 '''
293 return self._map.get(key, ("?",))[0]
293 return self._map.get(key, ("?",))[0]
294
294
295 def __contains__(self, key):
295 def __contains__(self, key):
296 return key in self._map
296 return key in self._map
297
297
298 def __iter__(self):
298 def __iter__(self):
299 for x in sorted(self._map):
299 for x in sorted(self._map):
300 yield x
300 yield x
301
301
302 def iteritems(self):
302 def iteritems(self):
303 return self._map.iteritems()
303 return self._map.iteritems()
304
304
305 def parents(self):
305 def parents(self):
306 return [self._validate(p) for p in self._pl]
306 return [self._validate(p) for p in self._pl]
307
307
308 def p1(self):
308 def p1(self):
309 return self._validate(self._pl[0])
309 return self._validate(self._pl[0])
310
310
311 def p2(self):
311 def p2(self):
312 return self._validate(self._pl[1])
312 return self._validate(self._pl[1])
313
313
314 def branch(self):
314 def branch(self):
315 return encoding.tolocal(self._branch)
315 return encoding.tolocal(self._branch)
316
316
317 def setparents(self, p1, p2=nullid):
317 def setparents(self, p1, p2=nullid):
318 """Set dirstate parents to p1 and p2.
318 """Set dirstate parents to p1 and p2.
319
319
320 When moving from two parents to one, 'm' merged entries a
320 When moving from two parents to one, 'm' merged entries a
321 adjusted to normal and previous copy records discarded and
321 adjusted to normal and previous copy records discarded and
322 returned by the call.
322 returned by the call.
323
323
324 See localrepo.setparents()
324 See localrepo.setparents()
325 """
325 """
326 if self._parentwriters == 0:
326 if self._parentwriters == 0:
327 raise ValueError("cannot set dirstate parent without "
327 raise ValueError("cannot set dirstate parent without "
328 "calling dirstate.beginparentchange")
328 "calling dirstate.beginparentchange")
329
329
330 self._dirty = self._dirtypl = True
330 self._dirty = self._dirtypl = True
331 oldp2 = self._pl[1]
331 oldp2 = self._pl[1]
332 self._pl = p1, p2
332 self._pl = p1, p2
333 copies = {}
333 copies = {}
334 if oldp2 != nullid and p2 == nullid:
334 if oldp2 != nullid and p2 == nullid:
335 for f, s in self._map.iteritems():
335 for f, s in self._map.iteritems():
336 # Discard 'm' markers when moving away from a merge state
336 # Discard 'm' markers when moving away from a merge state
337 if s[0] == 'm':
337 if s[0] == 'm':
338 if f in self._copymap:
338 if f in self._copymap:
339 copies[f] = self._copymap[f]
339 copies[f] = self._copymap[f]
340 self.normallookup(f)
340 self.normallookup(f)
341 # Also fix up otherparent markers
341 # Also fix up otherparent markers
342 elif s[0] == 'n' and s[2] == -2:
342 elif s[0] == 'n' and s[2] == -2:
343 if f in self._copymap:
343 if f in self._copymap:
344 copies[f] = self._copymap[f]
344 copies[f] = self._copymap[f]
345 self.add(f)
345 self.add(f)
346 return copies
346 return copies
347
347
348 def setbranch(self, branch):
348 def setbranch(self, branch):
349 self._branch = encoding.fromlocal(branch)
349 self._branch = encoding.fromlocal(branch)
350 f = self._opener('branch', 'w', atomictemp=True)
350 f = self._opener('branch', 'w', atomictemp=True)
351 try:
351 try:
352 f.write(self._branch + '\n')
352 f.write(self._branch + '\n')
353 f.close()
353 f.close()
354
354
355 # make sure filecache has the correct stat info for _branch after
355 # make sure filecache has the correct stat info for _branch after
356 # replacing the underlying file
356 # replacing the underlying file
357 ce = self._filecache['_branch']
357 ce = self._filecache['_branch']
358 if ce:
358 if ce:
359 ce.refresh()
359 ce.refresh()
360 except: # re-raises
360 except: # re-raises
361 f.discard()
361 f.discard()
362 raise
362 raise
363
363
364 def _opendirstatefile(self):
364 def _opendirstatefile(self):
365 fp, mode = _trypending(self._root, self._opener, self._filename)
365 fp, mode = _trypending(self._root, self._opener, self._filename)
366 if self._pendingmode is not None and self._pendingmode != mode:
366 if self._pendingmode is not None and self._pendingmode != mode:
367 fp.close()
367 fp.close()
368 raise error.Abort(_('working directory state may be '
368 raise error.Abort(_('working directory state may be '
369 'changed parallelly'))
369 'changed parallelly'))
370 self._pendingmode = mode
370 self._pendingmode = mode
371 return fp
371 return fp
372
372
373 def _read(self):
373 def _read(self):
374 self._map = {}
374 self._map = {}
375 self._copymap = {}
375 self._copymap = {}
376 try:
376 try:
377 fp = self._opendirstatefile()
377 fp = self._opendirstatefile()
378 try:
378 try:
379 st = fp.read()
379 st = fp.read()
380 finally:
380 finally:
381 fp.close()
381 fp.close()
382 except IOError as err:
382 except IOError as err:
383 if err.errno != errno.ENOENT:
383 if err.errno != errno.ENOENT:
384 raise
384 raise
385 return
385 return
386 if not st:
386 if not st:
387 return
387 return
388
388
389 if util.safehasattr(parsers, 'dict_new_presized'):
389 if util.safehasattr(parsers, 'dict_new_presized'):
390 # Make an estimate of the number of files in the dirstate based on
390 # Make an estimate of the number of files in the dirstate based on
391 # its size. From a linear regression on a set of real-world repos,
391 # its size. From a linear regression on a set of real-world repos,
392 # all over 10,000 files, the size of a dirstate entry is 85
392 # all over 10,000 files, the size of a dirstate entry is 85
393 # bytes. The cost of resizing is significantly higher than the cost
393 # bytes. The cost of resizing is significantly higher than the cost
394 # of filling in a larger presized dict, so subtract 20% from the
394 # of filling in a larger presized dict, so subtract 20% from the
395 # size.
395 # size.
396 #
396 #
397 # This heuristic is imperfect in many ways, so in a future dirstate
397 # This heuristic is imperfect in many ways, so in a future dirstate
398 # format update it makes sense to just record the number of entries
398 # format update it makes sense to just record the number of entries
399 # on write.
399 # on write.
400 self._map = parsers.dict_new_presized(len(st) / 71)
400 self._map = parsers.dict_new_presized(len(st) / 71)
401
401
402 # Python's garbage collector triggers a GC each time a certain number
402 # Python's garbage collector triggers a GC each time a certain number
403 # of container objects (the number being defined by
403 # of container objects (the number being defined by
404 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
404 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
405 # for each file in the dirstate. The C version then immediately marks
405 # for each file in the dirstate. The C version then immediately marks
406 # them as not to be tracked by the collector. However, this has no
406 # them as not to be tracked by the collector. However, this has no
407 # effect on when GCs are triggered, only on what objects the GC looks
407 # effect on when GCs are triggered, only on what objects the GC looks
408 # into. This means that O(number of files) GCs are unavoidable.
408 # into. This means that O(number of files) GCs are unavoidable.
409 # Depending on when in the process's lifetime the dirstate is parsed,
409 # Depending on when in the process's lifetime the dirstate is parsed,
410 # this can get very expensive. As a workaround, disable GC while
410 # this can get very expensive. As a workaround, disable GC while
411 # parsing the dirstate.
411 # parsing the dirstate.
412 #
412 #
413 # (we cannot decorate the function directly since it is in a C module)
413 # (we cannot decorate the function directly since it is in a C module)
414 parse_dirstate = util.nogc(parsers.parse_dirstate)
414 parse_dirstate = util.nogc(parsers.parse_dirstate)
415 p = parse_dirstate(self._map, self._copymap, st)
415 p = parse_dirstate(self._map, self._copymap, st)
416 if not self._dirtypl:
416 if not self._dirtypl:
417 self._pl = p
417 self._pl = p
418
418
419 def invalidate(self):
419 def invalidate(self):
420 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
420 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
421 "_pl", "_dirs", "_ignore"):
421 "_pl", "_dirs", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427
427
428 def copy(self, source, dest):
428 def copy(self, source, dest):
429 """Mark dest as a copy of source. Unmark dest if source is None."""
429 """Mark dest as a copy of source. Unmark dest if source is None."""
430 if source == dest:
430 if source == dest:
431 return
431 return
432 self._dirty = True
432 self._dirty = True
433 if source is not None:
433 if source is not None:
434 self._copymap[dest] = source
434 self._copymap[dest] = source
435 elif dest in self._copymap:
435 elif dest in self._copymap:
436 del self._copymap[dest]
436 del self._copymap[dest]
437
437
438 def copied(self, file):
438 def copied(self, file):
439 return self._copymap.get(file, None)
439 return self._copymap.get(file, None)
440
440
441 def copies(self):
441 def copies(self):
442 return self._copymap
442 return self._copymap
443
443
444 def _droppath(self, f):
444 def _droppath(self, f):
445 if self[f] not in "?r" and "_dirs" in self.__dict__:
445 if self[f] not in "?r" and "_dirs" in self.__dict__:
446 self._dirs.delpath(f)
446 self._dirs.delpath(f)
447
447
448 if "_filefoldmap" in self.__dict__:
449 normed = util.normcase(f)
450 if normed in self._filefoldmap:
451 del self._filefoldmap[normed]
452
448 def _addpath(self, f, state, mode, size, mtime):
453 def _addpath(self, f, state, mode, size, mtime):
449 oldstate = self[f]
454 oldstate = self[f]
450 if state == 'a' or oldstate == 'r':
455 if state == 'a' or oldstate == 'r':
451 scmutil.checkfilename(f)
456 scmutil.checkfilename(f)
452 if f in self._dirs:
457 if f in self._dirs:
453 raise error.Abort(_('directory %r already in dirstate') % f)
458 raise error.Abort(_('directory %r already in dirstate') % f)
454 # shadows
459 # shadows
455 for d in util.finddirs(f):
460 for d in util.finddirs(f):
456 if d in self._dirs:
461 if d in self._dirs:
457 break
462 break
458 if d in self._map and self[d] != 'r':
463 if d in self._map and self[d] != 'r':
459 raise error.Abort(
464 raise error.Abort(
460 _('file %r in dirstate clashes with %r') % (d, f))
465 _('file %r in dirstate clashes with %r') % (d, f))
461 if oldstate in "?r" and "_dirs" in self.__dict__:
466 if oldstate in "?r" and "_dirs" in self.__dict__:
462 self._dirs.addpath(f)
467 self._dirs.addpath(f)
463 self._dirty = True
468 self._dirty = True
464 self._map[f] = dirstatetuple(state, mode, size, mtime)
469 self._map[f] = dirstatetuple(state, mode, size, mtime)
465
470
466 def normal(self, f):
471 def normal(self, f):
467 '''Mark a file normal and clean.'''
472 '''Mark a file normal and clean.'''
468 s = os.lstat(self._join(f))
473 s = os.lstat(self._join(f))
469 mtime = util.statmtimesec(s)
474 mtime = util.statmtimesec(s)
470 self._addpath(f, 'n', s.st_mode,
475 self._addpath(f, 'n', s.st_mode,
471 s.st_size & _rangemask, mtime & _rangemask)
476 s.st_size & _rangemask, mtime & _rangemask)
472 if f in self._copymap:
477 if f in self._copymap:
473 del self._copymap[f]
478 del self._copymap[f]
474 if mtime > self._lastnormaltime:
479 if mtime > self._lastnormaltime:
475 # Remember the most recent modification timeslot for status(),
480 # Remember the most recent modification timeslot for status(),
476 # to make sure we won't miss future size-preserving file content
481 # to make sure we won't miss future size-preserving file content
477 # modifications that happen within the same timeslot.
482 # modifications that happen within the same timeslot.
478 self._lastnormaltime = mtime
483 self._lastnormaltime = mtime
479
484
480 def normallookup(self, f):
485 def normallookup(self, f):
481 '''Mark a file normal, but possibly dirty.'''
486 '''Mark a file normal, but possibly dirty.'''
482 if self._pl[1] != nullid and f in self._map:
487 if self._pl[1] != nullid and f in self._map:
483 # if there is a merge going on and the file was either
488 # if there is a merge going on and the file was either
484 # in state 'm' (-1) or coming from other parent (-2) before
489 # in state 'm' (-1) or coming from other parent (-2) before
485 # being removed, restore that state.
490 # being removed, restore that state.
486 entry = self._map[f]
491 entry = self._map[f]
487 if entry[0] == 'r' and entry[2] in (-1, -2):
492 if entry[0] == 'r' and entry[2] in (-1, -2):
488 source = self._copymap.get(f)
493 source = self._copymap.get(f)
489 if entry[2] == -1:
494 if entry[2] == -1:
490 self.merge(f)
495 self.merge(f)
491 elif entry[2] == -2:
496 elif entry[2] == -2:
492 self.otherparent(f)
497 self.otherparent(f)
493 if source:
498 if source:
494 self.copy(source, f)
499 self.copy(source, f)
495 return
500 return
496 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
501 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
497 return
502 return
498 self._addpath(f, 'n', 0, -1, -1)
503 self._addpath(f, 'n', 0, -1, -1)
499 if f in self._copymap:
504 if f in self._copymap:
500 del self._copymap[f]
505 del self._copymap[f]
501
506
502 def otherparent(self, f):
507 def otherparent(self, f):
503 '''Mark as coming from the other parent, always dirty.'''
508 '''Mark as coming from the other parent, always dirty.'''
504 if self._pl[1] == nullid:
509 if self._pl[1] == nullid:
505 raise error.Abort(_("setting %r to other parent "
510 raise error.Abort(_("setting %r to other parent "
506 "only allowed in merges") % f)
511 "only allowed in merges") % f)
507 if f in self and self[f] == 'n':
512 if f in self and self[f] == 'n':
508 # merge-like
513 # merge-like
509 self._addpath(f, 'm', 0, -2, -1)
514 self._addpath(f, 'm', 0, -2, -1)
510 else:
515 else:
511 # add-like
516 # add-like
512 self._addpath(f, 'n', 0, -2, -1)
517 self._addpath(f, 'n', 0, -2, -1)
513
518
514 if f in self._copymap:
519 if f in self._copymap:
515 del self._copymap[f]
520 del self._copymap[f]
516
521
517 def add(self, f):
522 def add(self, f):
518 '''Mark a file added.'''
523 '''Mark a file added.'''
519 self._addpath(f, 'a', 0, -1, -1)
524 self._addpath(f, 'a', 0, -1, -1)
520 if f in self._copymap:
525 if f in self._copymap:
521 del self._copymap[f]
526 del self._copymap[f]
522
527
523 def remove(self, f):
528 def remove(self, f):
524 '''Mark a file removed.'''
529 '''Mark a file removed.'''
525 self._dirty = True
530 self._dirty = True
526 self._droppath(f)
531 self._droppath(f)
527 size = 0
532 size = 0
528 if self._pl[1] != nullid and f in self._map:
533 if self._pl[1] != nullid and f in self._map:
529 # backup the previous state
534 # backup the previous state
530 entry = self._map[f]
535 entry = self._map[f]
531 if entry[0] == 'm': # merge
536 if entry[0] == 'm': # merge
532 size = -1
537 size = -1
533 elif entry[0] == 'n' and entry[2] == -2: # other parent
538 elif entry[0] == 'n' and entry[2] == -2: # other parent
534 size = -2
539 size = -2
535 self._map[f] = dirstatetuple('r', 0, size, 0)
540 self._map[f] = dirstatetuple('r', 0, size, 0)
536 if size == 0 and f in self._copymap:
541 if size == 0 and f in self._copymap:
537 del self._copymap[f]
542 del self._copymap[f]
538
543
539 def merge(self, f):
544 def merge(self, f):
540 '''Mark a file merged.'''
545 '''Mark a file merged.'''
541 if self._pl[1] == nullid:
546 if self._pl[1] == nullid:
542 return self.normallookup(f)
547 return self.normallookup(f)
543 return self.otherparent(f)
548 return self.otherparent(f)
544
549
545 def drop(self, f):
550 def drop(self, f):
546 '''Drop a file from the dirstate'''
551 '''Drop a file from the dirstate'''
547 if f in self._map:
552 if f in self._map:
548 self._dirty = True
553 self._dirty = True
549 self._droppath(f)
554 self._droppath(f)
550 del self._map[f]
555 del self._map[f]
551
556
552 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
557 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
553 if exists is None:
558 if exists is None:
554 exists = os.path.lexists(os.path.join(self._root, path))
559 exists = os.path.lexists(os.path.join(self._root, path))
555 if not exists:
560 if not exists:
556 # Maybe a path component exists
561 # Maybe a path component exists
557 if not ignoremissing and '/' in path:
562 if not ignoremissing and '/' in path:
558 d, f = path.rsplit('/', 1)
563 d, f = path.rsplit('/', 1)
559 d = self._normalize(d, False, ignoremissing, None)
564 d = self._normalize(d, False, ignoremissing, None)
560 folded = d + "/" + f
565 folded = d + "/" + f
561 else:
566 else:
562 # No path components, preserve original case
567 # No path components, preserve original case
563 folded = path
568 folded = path
564 else:
569 else:
565 # recursively normalize leading directory components
570 # recursively normalize leading directory components
566 # against dirstate
571 # against dirstate
567 if '/' in normed:
572 if '/' in normed:
568 d, f = normed.rsplit('/', 1)
573 d, f = normed.rsplit('/', 1)
569 d = self._normalize(d, False, ignoremissing, True)
574 d = self._normalize(d, False, ignoremissing, True)
570 r = self._root + "/" + d
575 r = self._root + "/" + d
571 folded = d + "/" + util.fspath(f, r)
576 folded = d + "/" + util.fspath(f, r)
572 else:
577 else:
573 folded = util.fspath(normed, self._root)
578 folded = util.fspath(normed, self._root)
574 storemap[normed] = folded
579 storemap[normed] = folded
575
580
576 return folded
581 return folded
577
582
578 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
583 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
579 normed = util.normcase(path)
584 normed = util.normcase(path)
580 folded = self._filefoldmap.get(normed, None)
585 folded = self._filefoldmap.get(normed, None)
581 if folded is None:
586 if folded is None:
582 if isknown:
587 if isknown:
583 folded = path
588 folded = path
584 else:
589 else:
585 folded = self._discoverpath(path, normed, ignoremissing, exists,
590 folded = self._discoverpath(path, normed, ignoremissing, exists,
586 self._filefoldmap)
591 self._filefoldmap)
587 return folded
592 return folded
588
593
589 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
594 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
590 normed = util.normcase(path)
595 normed = util.normcase(path)
591 folded = self._filefoldmap.get(normed, None)
596 folded = self._filefoldmap.get(normed, None)
592 if folded is None:
597 if folded is None:
593 folded = self._dirfoldmap.get(normed, None)
598 folded = self._dirfoldmap.get(normed, None)
594 if folded is None:
599 if folded is None:
595 if isknown:
600 if isknown:
596 folded = path
601 folded = path
597 else:
602 else:
598 # store discovered result in dirfoldmap so that future
603 # store discovered result in dirfoldmap so that future
599 # normalizefile calls don't start matching directories
604 # normalizefile calls don't start matching directories
600 folded = self._discoverpath(path, normed, ignoremissing, exists,
605 folded = self._discoverpath(path, normed, ignoremissing, exists,
601 self._dirfoldmap)
606 self._dirfoldmap)
602 return folded
607 return folded
603
608
604 def normalize(self, path, isknown=False, ignoremissing=False):
609 def normalize(self, path, isknown=False, ignoremissing=False):
605 '''
610 '''
606 normalize the case of a pathname when on a casefolding filesystem
611 normalize the case of a pathname when on a casefolding filesystem
607
612
608 isknown specifies whether the filename came from walking the
613 isknown specifies whether the filename came from walking the
609 disk, to avoid extra filesystem access.
614 disk, to avoid extra filesystem access.
610
615
611 If ignoremissing is True, missing path are returned
616 If ignoremissing is True, missing path are returned
612 unchanged. Otherwise, we try harder to normalize possibly
617 unchanged. Otherwise, we try harder to normalize possibly
613 existing path components.
618 existing path components.
614
619
615 The normalized case is determined based on the following precedence:
620 The normalized case is determined based on the following precedence:
616
621
617 - version of name already stored in the dirstate
622 - version of name already stored in the dirstate
618 - version of name stored on disk
623 - version of name stored on disk
619 - version provided via command arguments
624 - version provided via command arguments
620 '''
625 '''
621
626
622 if self._checkcase:
627 if self._checkcase:
623 return self._normalize(path, isknown, ignoremissing)
628 return self._normalize(path, isknown, ignoremissing)
624 return path
629 return path
625
630
626 def clear(self):
631 def clear(self):
627 self._map = {}
632 self._map = {}
628 if "_dirs" in self.__dict__:
633 if "_dirs" in self.__dict__:
629 delattr(self, "_dirs")
634 delattr(self, "_dirs")
630 self._copymap = {}
635 self._copymap = {}
631 self._pl = [nullid, nullid]
636 self._pl = [nullid, nullid]
632 self._lastnormaltime = 0
637 self._lastnormaltime = 0
633 self._dirty = True
638 self._dirty = True
634
639
635 def rebuild(self, parent, allfiles, changedfiles=None):
640 def rebuild(self, parent, allfiles, changedfiles=None):
636 if changedfiles is None:
641 if changedfiles is None:
637 changedfiles = allfiles
642 changedfiles = allfiles
638 oldmap = self._map
643 oldmap = self._map
639 self.clear()
644 self.clear()
640 for f in allfiles:
645 for f in allfiles:
641 if f not in changedfiles:
646 if f not in changedfiles:
642 self._map[f] = oldmap[f]
647 self._map[f] = oldmap[f]
643 else:
648 else:
644 if 'x' in allfiles.flags(f):
649 if 'x' in allfiles.flags(f):
645 self._map[f] = dirstatetuple('n', 0o777, -1, 0)
650 self._map[f] = dirstatetuple('n', 0o777, -1, 0)
646 else:
651 else:
647 self._map[f] = dirstatetuple('n', 0o666, -1, 0)
652 self._map[f] = dirstatetuple('n', 0o666, -1, 0)
648 self._pl = (parent, nullid)
653 self._pl = (parent, nullid)
649 self._dirty = True
654 self._dirty = True
650
655
651 def write(self, tr=False):
656 def write(self, tr=False):
652 if not self._dirty:
657 if not self._dirty:
653 return
658 return
654
659
655 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
660 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
656 # timestamp of each entries in dirstate, because of 'now > mtime'
661 # timestamp of each entries in dirstate, because of 'now > mtime'
657 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
662 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
658 if delaywrite > 0:
663 if delaywrite > 0:
659 import time # to avoid useless import
664 import time # to avoid useless import
660 time.sleep(delaywrite)
665 time.sleep(delaywrite)
661
666
662 filename = self._filename
667 filename = self._filename
663 if tr is False: # not explicitly specified
668 if tr is False: # not explicitly specified
664 if (self._ui.configbool('devel', 'all-warnings')
669 if (self._ui.configbool('devel', 'all-warnings')
665 or self._ui.configbool('devel', 'check-dirstate-write')):
670 or self._ui.configbool('devel', 'check-dirstate-write')):
666 self._ui.develwarn('use dirstate.write with '
671 self._ui.develwarn('use dirstate.write with '
667 'repo.currenttransaction()')
672 'repo.currenttransaction()')
668
673
669 if self._opener.lexists(self._pendingfilename):
674 if self._opener.lexists(self._pendingfilename):
670 # if pending file already exists, in-memory changes
675 # if pending file already exists, in-memory changes
671 # should be written into it, because it has priority
676 # should be written into it, because it has priority
672 # to '.hg/dirstate' at reading under HG_PENDING mode
677 # to '.hg/dirstate' at reading under HG_PENDING mode
673 filename = self._pendingfilename
678 filename = self._pendingfilename
674 elif tr:
679 elif tr:
675 # 'dirstate.write()' is not only for writing in-memory
680 # 'dirstate.write()' is not only for writing in-memory
676 # changes out, but also for dropping ambiguous timestamp.
681 # changes out, but also for dropping ambiguous timestamp.
677 # delayed writing re-raise "ambiguous timestamp issue".
682 # delayed writing re-raise "ambiguous timestamp issue".
678 # See also the wiki page below for detail:
683 # See also the wiki page below for detail:
679 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
684 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
680
685
681 # emulate dropping timestamp in 'parsers.pack_dirstate'
686 # emulate dropping timestamp in 'parsers.pack_dirstate'
682 now = _getfsnow(self._opener)
687 now = _getfsnow(self._opener)
683 dmap = self._map
688 dmap = self._map
684 for f, e in dmap.iteritems():
689 for f, e in dmap.iteritems():
685 if e[0] == 'n' and e[3] == now:
690 if e[0] == 'n' and e[3] == now:
686 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
691 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
687
692
688 # emulate that all 'dirstate.normal' results are written out
693 # emulate that all 'dirstate.normal' results are written out
689 self._lastnormaltime = 0
694 self._lastnormaltime = 0
690
695
691 # delay writing in-memory changes out
696 # delay writing in-memory changes out
692 tr.addfilegenerator('dirstate', (self._filename,),
697 tr.addfilegenerator('dirstate', (self._filename,),
693 self._writedirstate, location='plain')
698 self._writedirstate, location='plain')
694 return
699 return
695
700
696 st = self._opener(filename, "w", atomictemp=True)
701 st = self._opener(filename, "w", atomictemp=True)
697 self._writedirstate(st)
702 self._writedirstate(st)
698
703
699 def _writedirstate(self, st):
704 def _writedirstate(self, st):
700 # use the modification time of the newly created temporary file as the
705 # use the modification time of the newly created temporary file as the
701 # filesystem's notion of 'now'
706 # filesystem's notion of 'now'
702 now = util.statmtimesec(util.fstat(st)) & _rangemask
707 now = util.statmtimesec(util.fstat(st)) & _rangemask
703 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
708 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
704 st.close()
709 st.close()
705 self._lastnormaltime = 0
710 self._lastnormaltime = 0
706 self._dirty = self._dirtypl = False
711 self._dirty = self._dirtypl = False
707
712
708 def _dirignore(self, f):
713 def _dirignore(self, f):
709 if f == '.':
714 if f == '.':
710 return False
715 return False
711 if self._ignore(f):
716 if self._ignore(f):
712 return True
717 return True
713 for p in util.finddirs(f):
718 for p in util.finddirs(f):
714 if self._ignore(p):
719 if self._ignore(p):
715 return True
720 return True
716 return False
721 return False
717
722
718 def _walkexplicit(self, match, subrepos):
723 def _walkexplicit(self, match, subrepos):
719 '''Get stat data about the files explicitly specified by match.
724 '''Get stat data about the files explicitly specified by match.
720
725
721 Return a triple (results, dirsfound, dirsnotfound).
726 Return a triple (results, dirsfound, dirsnotfound).
722 - results is a mapping from filename to stat result. It also contains
727 - results is a mapping from filename to stat result. It also contains
723 listings mapping subrepos and .hg to None.
728 listings mapping subrepos and .hg to None.
724 - dirsfound is a list of files found to be directories.
729 - dirsfound is a list of files found to be directories.
725 - dirsnotfound is a list of files that the dirstate thinks are
730 - dirsnotfound is a list of files that the dirstate thinks are
726 directories and that were not found.'''
731 directories and that were not found.'''
727
732
728 def badtype(mode):
733 def badtype(mode):
729 kind = _('unknown')
734 kind = _('unknown')
730 if stat.S_ISCHR(mode):
735 if stat.S_ISCHR(mode):
731 kind = _('character device')
736 kind = _('character device')
732 elif stat.S_ISBLK(mode):
737 elif stat.S_ISBLK(mode):
733 kind = _('block device')
738 kind = _('block device')
734 elif stat.S_ISFIFO(mode):
739 elif stat.S_ISFIFO(mode):
735 kind = _('fifo')
740 kind = _('fifo')
736 elif stat.S_ISSOCK(mode):
741 elif stat.S_ISSOCK(mode):
737 kind = _('socket')
742 kind = _('socket')
738 elif stat.S_ISDIR(mode):
743 elif stat.S_ISDIR(mode):
739 kind = _('directory')
744 kind = _('directory')
740 return _('unsupported file type (type is %s)') % kind
745 return _('unsupported file type (type is %s)') % kind
741
746
742 matchedir = match.explicitdir
747 matchedir = match.explicitdir
743 badfn = match.bad
748 badfn = match.bad
744 dmap = self._map
749 dmap = self._map
745 lstat = os.lstat
750 lstat = os.lstat
746 getkind = stat.S_IFMT
751 getkind = stat.S_IFMT
747 dirkind = stat.S_IFDIR
752 dirkind = stat.S_IFDIR
748 regkind = stat.S_IFREG
753 regkind = stat.S_IFREG
749 lnkkind = stat.S_IFLNK
754 lnkkind = stat.S_IFLNK
750 join = self._join
755 join = self._join
751 dirsfound = []
756 dirsfound = []
752 foundadd = dirsfound.append
757 foundadd = dirsfound.append
753 dirsnotfound = []
758 dirsnotfound = []
754 notfoundadd = dirsnotfound.append
759 notfoundadd = dirsnotfound.append
755
760
756 if not match.isexact() and self._checkcase:
761 if not match.isexact() and self._checkcase:
757 normalize = self._normalize
762 normalize = self._normalize
758 else:
763 else:
759 normalize = None
764 normalize = None
760
765
761 files = sorted(match.files())
766 files = sorted(match.files())
762 subrepos.sort()
767 subrepos.sort()
763 i, j = 0, 0
768 i, j = 0, 0
764 while i < len(files) and j < len(subrepos):
769 while i < len(files) and j < len(subrepos):
765 subpath = subrepos[j] + "/"
770 subpath = subrepos[j] + "/"
766 if files[i] < subpath:
771 if files[i] < subpath:
767 i += 1
772 i += 1
768 continue
773 continue
769 while i < len(files) and files[i].startswith(subpath):
774 while i < len(files) and files[i].startswith(subpath):
770 del files[i]
775 del files[i]
771 j += 1
776 j += 1
772
777
773 if not files or '.' in files:
778 if not files or '.' in files:
774 files = ['.']
779 files = ['.']
775 results = dict.fromkeys(subrepos)
780 results = dict.fromkeys(subrepos)
776 results['.hg'] = None
781 results['.hg'] = None
777
782
778 alldirs = None
783 alldirs = None
779 for ff in files:
784 for ff in files:
780 # constructing the foldmap is expensive, so don't do it for the
785 # constructing the foldmap is expensive, so don't do it for the
781 # common case where files is ['.']
786 # common case where files is ['.']
782 if normalize and ff != '.':
787 if normalize and ff != '.':
783 nf = normalize(ff, False, True)
788 nf = normalize(ff, False, True)
784 else:
789 else:
785 nf = ff
790 nf = ff
786 if nf in results:
791 if nf in results:
787 continue
792 continue
788
793
789 try:
794 try:
790 st = lstat(join(nf))
795 st = lstat(join(nf))
791 kind = getkind(st.st_mode)
796 kind = getkind(st.st_mode)
792 if kind == dirkind:
797 if kind == dirkind:
793 if nf in dmap:
798 if nf in dmap:
794 # file replaced by dir on disk but still in dirstate
799 # file replaced by dir on disk but still in dirstate
795 results[nf] = None
800 results[nf] = None
796 if matchedir:
801 if matchedir:
797 matchedir(nf)
802 matchedir(nf)
798 foundadd((nf, ff))
803 foundadd((nf, ff))
799 elif kind == regkind or kind == lnkkind:
804 elif kind == regkind or kind == lnkkind:
800 results[nf] = st
805 results[nf] = st
801 else:
806 else:
802 badfn(ff, badtype(kind))
807 badfn(ff, badtype(kind))
803 if nf in dmap:
808 if nf in dmap:
804 results[nf] = None
809 results[nf] = None
805 except OSError as inst: # nf not found on disk - it is dirstate only
810 except OSError as inst: # nf not found on disk - it is dirstate only
806 if nf in dmap: # does it exactly match a missing file?
811 if nf in dmap: # does it exactly match a missing file?
807 results[nf] = None
812 results[nf] = None
808 else: # does it match a missing directory?
813 else: # does it match a missing directory?
809 if alldirs is None:
814 if alldirs is None:
810 alldirs = util.dirs(dmap)
815 alldirs = util.dirs(dmap)
811 if nf in alldirs:
816 if nf in alldirs:
812 if matchedir:
817 if matchedir:
813 matchedir(nf)
818 matchedir(nf)
814 notfoundadd(nf)
819 notfoundadd(nf)
815 else:
820 else:
816 badfn(ff, inst.strerror)
821 badfn(ff, inst.strerror)
817
822
818 # Case insensitive filesystems cannot rely on lstat() failing to detect
823 # Case insensitive filesystems cannot rely on lstat() failing to detect
819 # a case-only rename. Prune the stat object for any file that does not
824 # a case-only rename. Prune the stat object for any file that does not
820 # match the case in the filesystem, if there are multiple files that
825 # match the case in the filesystem, if there are multiple files that
821 # normalize to the same path.
826 # normalize to the same path.
822 if match.isexact() and self._checkcase:
827 if match.isexact() and self._checkcase:
823 normed = {}
828 normed = {}
824
829
825 for f, st in results.iteritems():
830 for f, st in results.iteritems():
826 if st is None:
831 if st is None:
827 continue
832 continue
828
833
829 nc = util.normcase(f)
834 nc = util.normcase(f)
830 paths = normed.get(nc)
835 paths = normed.get(nc)
831
836
832 if paths is None:
837 if paths is None:
833 paths = set()
838 paths = set()
834 normed[nc] = paths
839 normed[nc] = paths
835
840
836 paths.add(f)
841 paths.add(f)
837
842
838 for norm, paths in normed.iteritems():
843 for norm, paths in normed.iteritems():
839 if len(paths) > 1:
844 if len(paths) > 1:
840 for path in paths:
845 for path in paths:
841 folded = self._discoverpath(path, norm, True, None,
846 folded = self._discoverpath(path, norm, True, None,
842 self._dirfoldmap)
847 self._dirfoldmap)
843 if path != folded:
848 if path != folded:
844 results[path] = None
849 results[path] = None
845
850
846 return results, dirsfound, dirsnotfound
851 return results, dirsfound, dirsnotfound
847
852
848 def walk(self, match, subrepos, unknown, ignored, full=True):
853 def walk(self, match, subrepos, unknown, ignored, full=True):
849 '''
854 '''
850 Walk recursively through the directory tree, finding all files
855 Walk recursively through the directory tree, finding all files
851 matched by match.
856 matched by match.
852
857
853 If full is False, maybe skip some known-clean files.
858 If full is False, maybe skip some known-clean files.
854
859
855 Return a dict mapping filename to stat-like object (either
860 Return a dict mapping filename to stat-like object (either
856 mercurial.osutil.stat instance or return value of os.stat()).
861 mercurial.osutil.stat instance or return value of os.stat()).
857
862
858 '''
863 '''
859 # full is a flag that extensions that hook into walk can use -- this
864 # full is a flag that extensions that hook into walk can use -- this
860 # implementation doesn't use it at all. This satisfies the contract
865 # implementation doesn't use it at all. This satisfies the contract
861 # because we only guarantee a "maybe".
866 # because we only guarantee a "maybe".
862
867
863 if ignored:
868 if ignored:
864 ignore = util.never
869 ignore = util.never
865 dirignore = util.never
870 dirignore = util.never
866 elif unknown:
871 elif unknown:
867 ignore = self._ignore
872 ignore = self._ignore
868 dirignore = self._dirignore
873 dirignore = self._dirignore
869 else:
874 else:
870 # if not unknown and not ignored, drop dir recursion and step 2
875 # if not unknown and not ignored, drop dir recursion and step 2
871 ignore = util.always
876 ignore = util.always
872 dirignore = util.always
877 dirignore = util.always
873
878
874 matchfn = match.matchfn
879 matchfn = match.matchfn
875 matchalways = match.always()
880 matchalways = match.always()
876 matchtdir = match.traversedir
881 matchtdir = match.traversedir
877 dmap = self._map
882 dmap = self._map
878 listdir = osutil.listdir
883 listdir = osutil.listdir
879 lstat = os.lstat
884 lstat = os.lstat
880 dirkind = stat.S_IFDIR
885 dirkind = stat.S_IFDIR
881 regkind = stat.S_IFREG
886 regkind = stat.S_IFREG
882 lnkkind = stat.S_IFLNK
887 lnkkind = stat.S_IFLNK
883 join = self._join
888 join = self._join
884
889
885 exact = skipstep3 = False
890 exact = skipstep3 = False
886 if match.isexact(): # match.exact
891 if match.isexact(): # match.exact
887 exact = True
892 exact = True
888 dirignore = util.always # skip step 2
893 dirignore = util.always # skip step 2
889 elif match.prefix(): # match.match, no patterns
894 elif match.prefix(): # match.match, no patterns
890 skipstep3 = True
895 skipstep3 = True
891
896
892 if not exact and self._checkcase:
897 if not exact and self._checkcase:
893 normalize = self._normalize
898 normalize = self._normalize
894 normalizefile = self._normalizefile
899 normalizefile = self._normalizefile
895 skipstep3 = False
900 skipstep3 = False
896 else:
901 else:
897 normalize = self._normalize
902 normalize = self._normalize
898 normalizefile = None
903 normalizefile = None
899
904
900 # step 1: find all explicit files
905 # step 1: find all explicit files
901 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
906 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
902
907
903 skipstep3 = skipstep3 and not (work or dirsnotfound)
908 skipstep3 = skipstep3 and not (work or dirsnotfound)
904 work = [d for d in work if not dirignore(d[0])]
909 work = [d for d in work if not dirignore(d[0])]
905
910
906 # step 2: visit subdirectories
911 # step 2: visit subdirectories
907 def traverse(work, alreadynormed):
912 def traverse(work, alreadynormed):
908 wadd = work.append
913 wadd = work.append
909 while work:
914 while work:
910 nd = work.pop()
915 nd = work.pop()
911 skip = None
916 skip = None
912 if nd == '.':
917 if nd == '.':
913 nd = ''
918 nd = ''
914 else:
919 else:
915 skip = '.hg'
920 skip = '.hg'
916 try:
921 try:
917 entries = listdir(join(nd), stat=True, skip=skip)
922 entries = listdir(join(nd), stat=True, skip=skip)
918 except OSError as inst:
923 except OSError as inst:
919 if inst.errno in (errno.EACCES, errno.ENOENT):
924 if inst.errno in (errno.EACCES, errno.ENOENT):
920 match.bad(self.pathto(nd), inst.strerror)
925 match.bad(self.pathto(nd), inst.strerror)
921 continue
926 continue
922 raise
927 raise
923 for f, kind, st in entries:
928 for f, kind, st in entries:
924 if normalizefile:
929 if normalizefile:
925 # even though f might be a directory, we're only
930 # even though f might be a directory, we're only
926 # interested in comparing it to files currently in the
931 # interested in comparing it to files currently in the
927 # dmap -- therefore normalizefile is enough
932 # dmap -- therefore normalizefile is enough
928 nf = normalizefile(nd and (nd + "/" + f) or f, True,
933 nf = normalizefile(nd and (nd + "/" + f) or f, True,
929 True)
934 True)
930 else:
935 else:
931 nf = nd and (nd + "/" + f) or f
936 nf = nd and (nd + "/" + f) or f
932 if nf not in results:
937 if nf not in results:
933 if kind == dirkind:
938 if kind == dirkind:
934 if not ignore(nf):
939 if not ignore(nf):
935 if matchtdir:
940 if matchtdir:
936 matchtdir(nf)
941 matchtdir(nf)
937 wadd(nf)
942 wadd(nf)
938 if nf in dmap and (matchalways or matchfn(nf)):
943 if nf in dmap and (matchalways or matchfn(nf)):
939 results[nf] = None
944 results[nf] = None
940 elif kind == regkind or kind == lnkkind:
945 elif kind == regkind or kind == lnkkind:
941 if nf in dmap:
946 if nf in dmap:
942 if matchalways or matchfn(nf):
947 if matchalways or matchfn(nf):
943 results[nf] = st
948 results[nf] = st
944 elif ((matchalways or matchfn(nf))
949 elif ((matchalways or matchfn(nf))
945 and not ignore(nf)):
950 and not ignore(nf)):
946 # unknown file -- normalize if necessary
951 # unknown file -- normalize if necessary
947 if not alreadynormed:
952 if not alreadynormed:
948 nf = normalize(nf, False, True)
953 nf = normalize(nf, False, True)
949 results[nf] = st
954 results[nf] = st
950 elif nf in dmap and (matchalways or matchfn(nf)):
955 elif nf in dmap and (matchalways or matchfn(nf)):
951 results[nf] = None
956 results[nf] = None
952
957
953 for nd, d in work:
958 for nd, d in work:
954 # alreadynormed means that processwork doesn't have to do any
959 # alreadynormed means that processwork doesn't have to do any
955 # expensive directory normalization
960 # expensive directory normalization
956 alreadynormed = not normalize or nd == d
961 alreadynormed = not normalize or nd == d
957 traverse([d], alreadynormed)
962 traverse([d], alreadynormed)
958
963
959 for s in subrepos:
964 for s in subrepos:
960 del results[s]
965 del results[s]
961 del results['.hg']
966 del results['.hg']
962
967
963 # step 3: visit remaining files from dmap
968 # step 3: visit remaining files from dmap
964 if not skipstep3 and not exact:
969 if not skipstep3 and not exact:
965 # If a dmap file is not in results yet, it was either
970 # If a dmap file is not in results yet, it was either
966 # a) not matching matchfn b) ignored, c) missing, or d) under a
971 # a) not matching matchfn b) ignored, c) missing, or d) under a
967 # symlink directory.
972 # symlink directory.
968 if not results and matchalways:
973 if not results and matchalways:
969 visit = dmap.keys()
974 visit = dmap.keys()
970 else:
975 else:
971 visit = [f for f in dmap if f not in results and matchfn(f)]
976 visit = [f for f in dmap if f not in results and matchfn(f)]
972 visit.sort()
977 visit.sort()
973
978
974 if unknown:
979 if unknown:
975 # unknown == True means we walked all dirs under the roots
980 # unknown == True means we walked all dirs under the roots
976 # that wasn't ignored, and everything that matched was stat'ed
981 # that wasn't ignored, and everything that matched was stat'ed
977 # and is already in results.
982 # and is already in results.
978 # The rest must thus be ignored or under a symlink.
983 # The rest must thus be ignored or under a symlink.
979 audit_path = pathutil.pathauditor(self._root)
984 audit_path = pathutil.pathauditor(self._root)
980
985
981 for nf in iter(visit):
986 for nf in iter(visit):
982 # If a stat for the same file was already added with a
987 # If a stat for the same file was already added with a
983 # different case, don't add one for this, since that would
988 # different case, don't add one for this, since that would
984 # make it appear as if the file exists under both names
989 # make it appear as if the file exists under both names
985 # on disk.
990 # on disk.
986 if (normalizefile and
991 if (normalizefile and
987 normalizefile(nf, True, True) in results):
992 normalizefile(nf, True, True) in results):
988 results[nf] = None
993 results[nf] = None
989 # Report ignored items in the dmap as long as they are not
994 # Report ignored items in the dmap as long as they are not
990 # under a symlink directory.
995 # under a symlink directory.
991 elif audit_path.check(nf):
996 elif audit_path.check(nf):
992 try:
997 try:
993 results[nf] = lstat(join(nf))
998 results[nf] = lstat(join(nf))
994 # file was just ignored, no links, and exists
999 # file was just ignored, no links, and exists
995 except OSError:
1000 except OSError:
996 # file doesn't exist
1001 # file doesn't exist
997 results[nf] = None
1002 results[nf] = None
998 else:
1003 else:
999 # It's either missing or under a symlink directory
1004 # It's either missing or under a symlink directory
1000 # which we in this case report as missing
1005 # which we in this case report as missing
1001 results[nf] = None
1006 results[nf] = None
1002 else:
1007 else:
1003 # We may not have walked the full directory tree above,
1008 # We may not have walked the full directory tree above,
1004 # so stat and check everything we missed.
1009 # so stat and check everything we missed.
1005 nf = iter(visit).next
1010 nf = iter(visit).next
1006 pos = 0
1011 pos = 0
1007 while pos < len(visit):
1012 while pos < len(visit):
1008 # visit in mid-sized batches so that we don't
1013 # visit in mid-sized batches so that we don't
1009 # block signals indefinitely
1014 # block signals indefinitely
1010 xr = xrange(pos, min(len(visit), pos + 1000))
1015 xr = xrange(pos, min(len(visit), pos + 1000))
1011 for st in util.statfiles([join(visit[n]) for n in xr]):
1016 for st in util.statfiles([join(visit[n]) for n in xr]):
1012 results[nf()] = st
1017 results[nf()] = st
1013 pos += 1000
1018 pos += 1000
1014 return results
1019 return results
1015
1020
1016 def status(self, match, subrepos, ignored, clean, unknown):
1021 def status(self, match, subrepos, ignored, clean, unknown):
1017 '''Determine the status of the working copy relative to the
1022 '''Determine the status of the working copy relative to the
1018 dirstate and return a pair of (unsure, status), where status is of type
1023 dirstate and return a pair of (unsure, status), where status is of type
1019 scmutil.status and:
1024 scmutil.status and:
1020
1025
1021 unsure:
1026 unsure:
1022 files that might have been modified since the dirstate was
1027 files that might have been modified since the dirstate was
1023 written, but need to be read to be sure (size is the same
1028 written, but need to be read to be sure (size is the same
1024 but mtime differs)
1029 but mtime differs)
1025 status.modified:
1030 status.modified:
1026 files that have definitely been modified since the dirstate
1031 files that have definitely been modified since the dirstate
1027 was written (different size or mode)
1032 was written (different size or mode)
1028 status.clean:
1033 status.clean:
1029 files that have definitely not been modified since the
1034 files that have definitely not been modified since the
1030 dirstate was written
1035 dirstate was written
1031 '''
1036 '''
1032 listignored, listclean, listunknown = ignored, clean, unknown
1037 listignored, listclean, listunknown = ignored, clean, unknown
1033 lookup, modified, added, unknown, ignored = [], [], [], [], []
1038 lookup, modified, added, unknown, ignored = [], [], [], [], []
1034 removed, deleted, clean = [], [], []
1039 removed, deleted, clean = [], [], []
1035
1040
1036 dmap = self._map
1041 dmap = self._map
1037 ladd = lookup.append # aka "unsure"
1042 ladd = lookup.append # aka "unsure"
1038 madd = modified.append
1043 madd = modified.append
1039 aadd = added.append
1044 aadd = added.append
1040 uadd = unknown.append
1045 uadd = unknown.append
1041 iadd = ignored.append
1046 iadd = ignored.append
1042 radd = removed.append
1047 radd = removed.append
1043 dadd = deleted.append
1048 dadd = deleted.append
1044 cadd = clean.append
1049 cadd = clean.append
1045 mexact = match.exact
1050 mexact = match.exact
1046 dirignore = self._dirignore
1051 dirignore = self._dirignore
1047 checkexec = self._checkexec
1052 checkexec = self._checkexec
1048 copymap = self._copymap
1053 copymap = self._copymap
1049 lastnormaltime = self._lastnormaltime
1054 lastnormaltime = self._lastnormaltime
1050
1055
1051 # We need to do full walks when either
1056 # We need to do full walks when either
1052 # - we're listing all clean files, or
1057 # - we're listing all clean files, or
1053 # - match.traversedir does something, because match.traversedir should
1058 # - match.traversedir does something, because match.traversedir should
1054 # be called for every dir in the working dir
1059 # be called for every dir in the working dir
1055 full = listclean or match.traversedir is not None
1060 full = listclean or match.traversedir is not None
1056 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1061 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1057 full=full).iteritems():
1062 full=full).iteritems():
1058 if fn not in dmap:
1063 if fn not in dmap:
1059 if (listignored or mexact(fn)) and dirignore(fn):
1064 if (listignored or mexact(fn)) and dirignore(fn):
1060 if listignored:
1065 if listignored:
1061 iadd(fn)
1066 iadd(fn)
1062 else:
1067 else:
1063 uadd(fn)
1068 uadd(fn)
1064 continue
1069 continue
1065
1070
1066 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1071 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1067 # written like that for performance reasons. dmap[fn] is not a
1072 # written like that for performance reasons. dmap[fn] is not a
1068 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1073 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1069 # opcode has fast paths when the value to be unpacked is a tuple or
1074 # opcode has fast paths when the value to be unpacked is a tuple or
1070 # a list, but falls back to creating a full-fledged iterator in
1075 # a list, but falls back to creating a full-fledged iterator in
1071 # general. That is much slower than simply accessing and storing the
1076 # general. That is much slower than simply accessing and storing the
1072 # tuple members one by one.
1077 # tuple members one by one.
1073 t = dmap[fn]
1078 t = dmap[fn]
1074 state = t[0]
1079 state = t[0]
1075 mode = t[1]
1080 mode = t[1]
1076 size = t[2]
1081 size = t[2]
1077 time = t[3]
1082 time = t[3]
1078
1083
1079 if not st and state in "nma":
1084 if not st and state in "nma":
1080 dadd(fn)
1085 dadd(fn)
1081 elif state == 'n':
1086 elif state == 'n':
1082 mtime = util.statmtimesec(st)
1087 mtime = util.statmtimesec(st)
1083 if (size >= 0 and
1088 if (size >= 0 and
1084 ((size != st.st_size and size != st.st_size & _rangemask)
1089 ((size != st.st_size and size != st.st_size & _rangemask)
1085 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1090 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1086 or size == -2 # other parent
1091 or size == -2 # other parent
1087 or fn in copymap):
1092 or fn in copymap):
1088 madd(fn)
1093 madd(fn)
1089 elif time != mtime and time != mtime & _rangemask:
1094 elif time != mtime and time != mtime & _rangemask:
1090 ladd(fn)
1095 ladd(fn)
1091 elif mtime == lastnormaltime:
1096 elif mtime == lastnormaltime:
1092 # fn may have just been marked as normal and it may have
1097 # fn may have just been marked as normal and it may have
1093 # changed in the same second without changing its size.
1098 # changed in the same second without changing its size.
1094 # This can happen if we quickly do multiple commits.
1099 # This can happen if we quickly do multiple commits.
1095 # Force lookup, so we don't miss such a racy file change.
1100 # Force lookup, so we don't miss such a racy file change.
1096 ladd(fn)
1101 ladd(fn)
1097 elif listclean:
1102 elif listclean:
1098 cadd(fn)
1103 cadd(fn)
1099 elif state == 'm':
1104 elif state == 'm':
1100 madd(fn)
1105 madd(fn)
1101 elif state == 'a':
1106 elif state == 'a':
1102 aadd(fn)
1107 aadd(fn)
1103 elif state == 'r':
1108 elif state == 'r':
1104 radd(fn)
1109 radd(fn)
1105
1110
1106 return (lookup, scmutil.status(modified, added, removed, deleted,
1111 return (lookup, scmutil.status(modified, added, removed, deleted,
1107 unknown, ignored, clean))
1112 unknown, ignored, clean))
1108
1113
1109 def matches(self, match):
1114 def matches(self, match):
1110 '''
1115 '''
1111 return files in the dirstate (in whatever state) filtered by match
1116 return files in the dirstate (in whatever state) filtered by match
1112 '''
1117 '''
1113 dmap = self._map
1118 dmap = self._map
1114 if match.always():
1119 if match.always():
1115 return dmap.keys()
1120 return dmap.keys()
1116 files = match.files()
1121 files = match.files()
1117 if match.isexact():
1122 if match.isexact():
1118 # fast path -- filter the other way around, since typically files is
1123 # fast path -- filter the other way around, since typically files is
1119 # much smaller than dmap
1124 # much smaller than dmap
1120 return [f for f in files if f in dmap]
1125 return [f for f in files if f in dmap]
1121 if match.prefix() and all(fn in dmap for fn in files):
1126 if match.prefix() and all(fn in dmap for fn in files):
1122 # fast path -- all the values are known to be files, so just return
1127 # fast path -- all the values are known to be files, so just return
1123 # that
1128 # that
1124 return list(files)
1129 return list(files)
1125 return [f for f in dmap if match(f)]
1130 return [f for f in dmap if match(f)]
1126
1131
1127 def _actualfilename(self, tr):
1132 def _actualfilename(self, tr):
1128 if tr:
1133 if tr:
1129 return self._pendingfilename
1134 return self._pendingfilename
1130 else:
1135 else:
1131 return self._filename
1136 return self._filename
1132
1137
1133 def _savebackup(self, tr, suffix):
1138 def _savebackup(self, tr, suffix):
1134 '''Save current dirstate into backup file with suffix'''
1139 '''Save current dirstate into backup file with suffix'''
1135 filename = self._actualfilename(tr)
1140 filename = self._actualfilename(tr)
1136
1141
1137 # use '_writedirstate' instead of 'write' to write changes certainly,
1142 # use '_writedirstate' instead of 'write' to write changes certainly,
1138 # because the latter omits writing out if transaction is running.
1143 # because the latter omits writing out if transaction is running.
1139 # output file will be used to create backup of dirstate at this point.
1144 # output file will be used to create backup of dirstate at this point.
1140 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1145 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1141
1146
1142 if tr:
1147 if tr:
1143 # ensure that subsequent tr.writepending returns True for
1148 # ensure that subsequent tr.writepending returns True for
1144 # changes written out above, even if dirstate is never
1149 # changes written out above, even if dirstate is never
1145 # changed after this
1150 # changed after this
1146 tr.addfilegenerator('dirstate', (self._filename,),
1151 tr.addfilegenerator('dirstate', (self._filename,),
1147 self._writedirstate, location='plain')
1152 self._writedirstate, location='plain')
1148
1153
1149 # ensure that pending file written above is unlinked at
1154 # ensure that pending file written above is unlinked at
1150 # failure, even if tr.writepending isn't invoked until the
1155 # failure, even if tr.writepending isn't invoked until the
1151 # end of this transaction
1156 # end of this transaction
1152 tr.registertmp(filename, location='plain')
1157 tr.registertmp(filename, location='plain')
1153
1158
1154 self._opener.write(filename + suffix, self._opener.tryread(filename))
1159 self._opener.write(filename + suffix, self._opener.tryread(filename))
1155
1160
1156 def _restorebackup(self, tr, suffix):
1161 def _restorebackup(self, tr, suffix):
1157 '''Restore dirstate by backup file with suffix'''
1162 '''Restore dirstate by backup file with suffix'''
1158 # this "invalidate()" prevents "wlock.release()" from writing
1163 # this "invalidate()" prevents "wlock.release()" from writing
1159 # changes of dirstate out after restoring from backup file
1164 # changes of dirstate out after restoring from backup file
1160 self.invalidate()
1165 self.invalidate()
1161 filename = self._actualfilename(tr)
1166 filename = self._actualfilename(tr)
1162 self._opener.rename(filename + suffix, filename)
1167 self._opener.rename(filename + suffix, filename)
1163
1168
1164 def _clearbackup(self, tr, suffix):
1169 def _clearbackup(self, tr, suffix):
1165 '''Clear backup file with suffix'''
1170 '''Clear backup file with suffix'''
1166 filename = self._actualfilename(tr)
1171 filename = self._actualfilename(tr)
1167 self._opener.unlink(filename + suffix)
1172 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now