##// END OF EJS Templates
dirstate: only invoke delaywrite if relevant...
Matt Mackall -
r27398:c8167577 default
parent child Browse files
Show More
@@ -1,1171 +1,1175 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, osutil, parsers, encoding, pathutil, error
10 import scmutil, util, osutil, parsers, encoding, pathutil, error
11 import os, stat, errno
11 import os, stat, errno
12 import match as matchmod
12 import match as matchmod
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 dirstatetuple = parsers.dirstatetuple
18 dirstatetuple = parsers.dirstatetuple
19
19
20 class repocache(filecache):
20 class repocache(filecache):
21 """filecache for files in .hg/"""
21 """filecache for files in .hg/"""
22 def join(self, obj, fname):
22 def join(self, obj, fname):
23 return obj._opener.join(fname)
23 return obj._opener.join(fname)
24
24
25 class rootcache(filecache):
25 class rootcache(filecache):
26 """filecache for files in the repository root"""
26 """filecache for files in the repository root"""
27 def join(self, obj, fname):
27 def join(self, obj, fname):
28 return obj._join(fname)
28 return obj._join(fname)
29
29
30 def _getfsnow(vfs):
30 def _getfsnow(vfs):
31 '''Get "now" timestamp on filesystem'''
31 '''Get "now" timestamp on filesystem'''
32 tmpfd, tmpname = vfs.mkstemp()
32 tmpfd, tmpname = vfs.mkstemp()
33 try:
33 try:
34 return os.fstat(tmpfd).st_mtime
34 return os.fstat(tmpfd).st_mtime
35 finally:
35 finally:
36 os.close(tmpfd)
36 os.close(tmpfd)
37 vfs.unlink(tmpname)
37 vfs.unlink(tmpname)
38
38
39 def _trypending(root, vfs, filename):
39 def _trypending(root, vfs, filename):
40 '''Open file to be read according to HG_PENDING environment variable
40 '''Open file to be read according to HG_PENDING environment variable
41
41
42 This opens '.pending' of specified 'filename' only when HG_PENDING
42 This opens '.pending' of specified 'filename' only when HG_PENDING
43 is equal to 'root'.
43 is equal to 'root'.
44
44
45 This returns '(fp, is_pending_opened)' tuple.
45 This returns '(fp, is_pending_opened)' tuple.
46 '''
46 '''
47 if root == os.environ.get('HG_PENDING'):
47 if root == os.environ.get('HG_PENDING'):
48 try:
48 try:
49 return (vfs('%s.pending' % filename), True)
49 return (vfs('%s.pending' % filename), True)
50 except IOError as inst:
50 except IOError as inst:
51 if inst.errno != errno.ENOENT:
51 if inst.errno != errno.ENOENT:
52 raise
52 raise
53 return (vfs(filename), False)
53 return (vfs(filename), False)
54
54
55 class dirstate(object):
55 class dirstate(object):
56
56
57 def __init__(self, opener, ui, root, validate):
57 def __init__(self, opener, ui, root, validate):
58 '''Create a new dirstate object.
58 '''Create a new dirstate object.
59
59
60 opener is an open()-like callable that can be used to open the
60 opener is an open()-like callable that can be used to open the
61 dirstate file; root is the root of the directory tracked by
61 dirstate file; root is the root of the directory tracked by
62 the dirstate.
62 the dirstate.
63 '''
63 '''
64 self._opener = opener
64 self._opener = opener
65 self._validate = validate
65 self._validate = validate
66 self._root = root
66 self._root = root
67 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
67 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
68 # UNC path pointing to root share (issue4557)
68 # UNC path pointing to root share (issue4557)
69 self._rootdir = pathutil.normasprefix(root)
69 self._rootdir = pathutil.normasprefix(root)
70 # internal config: ui.forcecwd
70 # internal config: ui.forcecwd
71 forcecwd = ui.config('ui', 'forcecwd')
71 forcecwd = ui.config('ui', 'forcecwd')
72 if forcecwd:
72 if forcecwd:
73 self._cwd = forcecwd
73 self._cwd = forcecwd
74 self._dirty = False
74 self._dirty = False
75 self._dirtypl = False
75 self._dirtypl = False
76 self._lastnormaltime = 0
76 self._lastnormaltime = 0
77 self._ui = ui
77 self._ui = ui
78 self._filecache = {}
78 self._filecache = {}
79 self._parentwriters = 0
79 self._parentwriters = 0
80 self._filename = 'dirstate'
80 self._filename = 'dirstate'
81 self._pendingfilename = '%s.pending' % self._filename
81 self._pendingfilename = '%s.pending' % self._filename
82
82
83 # for consistent view between _pl() and _read() invocations
83 # for consistent view between _pl() and _read() invocations
84 self._pendingmode = None
84 self._pendingmode = None
85
85
86 def beginparentchange(self):
86 def beginparentchange(self):
87 '''Marks the beginning of a set of changes that involve changing
87 '''Marks the beginning of a set of changes that involve changing
88 the dirstate parents. If there is an exception during this time,
88 the dirstate parents. If there is an exception during this time,
89 the dirstate will not be written when the wlock is released. This
89 the dirstate will not be written when the wlock is released. This
90 prevents writing an incoherent dirstate where the parent doesn't
90 prevents writing an incoherent dirstate where the parent doesn't
91 match the contents.
91 match the contents.
92 '''
92 '''
93 self._parentwriters += 1
93 self._parentwriters += 1
94
94
95 def endparentchange(self):
95 def endparentchange(self):
96 '''Marks the end of a set of changes that involve changing the
96 '''Marks the end of a set of changes that involve changing the
97 dirstate parents. Once all parent changes have been marked done,
97 dirstate parents. Once all parent changes have been marked done,
98 the wlock will be free to write the dirstate on release.
98 the wlock will be free to write the dirstate on release.
99 '''
99 '''
100 if self._parentwriters > 0:
100 if self._parentwriters > 0:
101 self._parentwriters -= 1
101 self._parentwriters -= 1
102
102
103 def pendingparentchange(self):
103 def pendingparentchange(self):
104 '''Returns true if the dirstate is in the middle of a set of changes
104 '''Returns true if the dirstate is in the middle of a set of changes
105 that modify the dirstate parent.
105 that modify the dirstate parent.
106 '''
106 '''
107 return self._parentwriters > 0
107 return self._parentwriters > 0
108
108
109 @propertycache
109 @propertycache
110 def _map(self):
110 def _map(self):
111 '''Return the dirstate contents as a map from filename to
111 '''Return the dirstate contents as a map from filename to
112 (state, mode, size, time).'''
112 (state, mode, size, time).'''
113 self._read()
113 self._read()
114 return self._map
114 return self._map
115
115
116 @propertycache
116 @propertycache
117 def _copymap(self):
117 def _copymap(self):
118 self._read()
118 self._read()
119 return self._copymap
119 return self._copymap
120
120
121 @propertycache
121 @propertycache
122 def _filefoldmap(self):
122 def _filefoldmap(self):
123 try:
123 try:
124 makefilefoldmap = parsers.make_file_foldmap
124 makefilefoldmap = parsers.make_file_foldmap
125 except AttributeError:
125 except AttributeError:
126 pass
126 pass
127 else:
127 else:
128 return makefilefoldmap(self._map, util.normcasespec,
128 return makefilefoldmap(self._map, util.normcasespec,
129 util.normcasefallback)
129 util.normcasefallback)
130
130
131 f = {}
131 f = {}
132 normcase = util.normcase
132 normcase = util.normcase
133 for name, s in self._map.iteritems():
133 for name, s in self._map.iteritems():
134 if s[0] != 'r':
134 if s[0] != 'r':
135 f[normcase(name)] = name
135 f[normcase(name)] = name
136 f['.'] = '.' # prevents useless util.fspath() invocation
136 f['.'] = '.' # prevents useless util.fspath() invocation
137 return f
137 return f
138
138
139 @propertycache
139 @propertycache
140 def _dirfoldmap(self):
140 def _dirfoldmap(self):
141 f = {}
141 f = {}
142 normcase = util.normcase
142 normcase = util.normcase
143 for name in self._dirs:
143 for name in self._dirs:
144 f[normcase(name)] = name
144 f[normcase(name)] = name
145 return f
145 return f
146
146
147 @repocache('branch')
147 @repocache('branch')
148 def _branch(self):
148 def _branch(self):
149 try:
149 try:
150 return self._opener.read("branch").strip() or "default"
150 return self._opener.read("branch").strip() or "default"
151 except IOError as inst:
151 except IOError as inst:
152 if inst.errno != errno.ENOENT:
152 if inst.errno != errno.ENOENT:
153 raise
153 raise
154 return "default"
154 return "default"
155
155
156 @propertycache
156 @propertycache
157 def _pl(self):
157 def _pl(self):
158 try:
158 try:
159 fp = self._opendirstatefile()
159 fp = self._opendirstatefile()
160 st = fp.read(40)
160 st = fp.read(40)
161 fp.close()
161 fp.close()
162 l = len(st)
162 l = len(st)
163 if l == 40:
163 if l == 40:
164 return st[:20], st[20:40]
164 return st[:20], st[20:40]
165 elif l > 0 and l < 40:
165 elif l > 0 and l < 40:
166 raise error.Abort(_('working directory state appears damaged!'))
166 raise error.Abort(_('working directory state appears damaged!'))
167 except IOError as err:
167 except IOError as err:
168 if err.errno != errno.ENOENT:
168 if err.errno != errno.ENOENT:
169 raise
169 raise
170 return [nullid, nullid]
170 return [nullid, nullid]
171
171
172 @propertycache
172 @propertycache
173 def _dirs(self):
173 def _dirs(self):
174 return util.dirs(self._map, 'r')
174 return util.dirs(self._map, 'r')
175
175
176 def dirs(self):
176 def dirs(self):
177 return self._dirs
177 return self._dirs
178
178
179 @rootcache('.hgignore')
179 @rootcache('.hgignore')
180 def _ignore(self):
180 def _ignore(self):
181 files = []
181 files = []
182 if os.path.exists(self._join('.hgignore')):
182 if os.path.exists(self._join('.hgignore')):
183 files.append(self._join('.hgignore'))
183 files.append(self._join('.hgignore'))
184 for name, path in self._ui.configitems("ui"):
184 for name, path in self._ui.configitems("ui"):
185 if name == 'ignore' or name.startswith('ignore.'):
185 if name == 'ignore' or name.startswith('ignore.'):
186 # we need to use os.path.join here rather than self._join
186 # we need to use os.path.join here rather than self._join
187 # because path is arbitrary and user-specified
187 # because path is arbitrary and user-specified
188 files.append(os.path.join(self._rootdir, util.expandpath(path)))
188 files.append(os.path.join(self._rootdir, util.expandpath(path)))
189
189
190 if not files:
190 if not files:
191 return util.never
191 return util.never
192
192
193 pats = ['include:%s' % f for f in files]
193 pats = ['include:%s' % f for f in files]
194 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
194 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
195
195
196 @propertycache
196 @propertycache
197 def _slash(self):
197 def _slash(self):
198 return self._ui.configbool('ui', 'slash') and os.sep != '/'
198 return self._ui.configbool('ui', 'slash') and os.sep != '/'
199
199
200 @propertycache
200 @propertycache
201 def _checklink(self):
201 def _checklink(self):
202 return util.checklink(self._root)
202 return util.checklink(self._root)
203
203
204 @propertycache
204 @propertycache
205 def _checkexec(self):
205 def _checkexec(self):
206 return util.checkexec(self._root)
206 return util.checkexec(self._root)
207
207
208 @propertycache
208 @propertycache
209 def _checkcase(self):
209 def _checkcase(self):
210 return not util.checkcase(self._join('.hg'))
210 return not util.checkcase(self._join('.hg'))
211
211
212 def _join(self, f):
212 def _join(self, f):
213 # much faster than os.path.join()
213 # much faster than os.path.join()
214 # it's safe because f is always a relative path
214 # it's safe because f is always a relative path
215 return self._rootdir + f
215 return self._rootdir + f
216
216
217 def flagfunc(self, buildfallback):
217 def flagfunc(self, buildfallback):
218 if self._checklink and self._checkexec:
218 if self._checklink and self._checkexec:
219 def f(x):
219 def f(x):
220 try:
220 try:
221 st = os.lstat(self._join(x))
221 st = os.lstat(self._join(x))
222 if util.statislink(st):
222 if util.statislink(st):
223 return 'l'
223 return 'l'
224 if util.statisexec(st):
224 if util.statisexec(st):
225 return 'x'
225 return 'x'
226 except OSError:
226 except OSError:
227 pass
227 pass
228 return ''
228 return ''
229 return f
229 return f
230
230
231 fallback = buildfallback()
231 fallback = buildfallback()
232 if self._checklink:
232 if self._checklink:
233 def f(x):
233 def f(x):
234 if os.path.islink(self._join(x)):
234 if os.path.islink(self._join(x)):
235 return 'l'
235 return 'l'
236 if 'x' in fallback(x):
236 if 'x' in fallback(x):
237 return 'x'
237 return 'x'
238 return ''
238 return ''
239 return f
239 return f
240 if self._checkexec:
240 if self._checkexec:
241 def f(x):
241 def f(x):
242 if 'l' in fallback(x):
242 if 'l' in fallback(x):
243 return 'l'
243 return 'l'
244 if util.isexec(self._join(x)):
244 if util.isexec(self._join(x)):
245 return 'x'
245 return 'x'
246 return ''
246 return ''
247 return f
247 return f
248 else:
248 else:
249 return fallback
249 return fallback
250
250
251 @propertycache
251 @propertycache
252 def _cwd(self):
252 def _cwd(self):
253 return os.getcwd()
253 return os.getcwd()
254
254
255 def getcwd(self):
255 def getcwd(self):
256 '''Return the path from which a canonical path is calculated.
256 '''Return the path from which a canonical path is calculated.
257
257
258 This path should be used to resolve file patterns or to convert
258 This path should be used to resolve file patterns or to convert
259 canonical paths back to file paths for display. It shouldn't be
259 canonical paths back to file paths for display. It shouldn't be
260 used to get real file paths. Use vfs functions instead.
260 used to get real file paths. Use vfs functions instead.
261 '''
261 '''
262 cwd = self._cwd
262 cwd = self._cwd
263 if cwd == self._root:
263 if cwd == self._root:
264 return ''
264 return ''
265 # self._root ends with a path separator if self._root is '/' or 'C:\'
265 # self._root ends with a path separator if self._root is '/' or 'C:\'
266 rootsep = self._root
266 rootsep = self._root
267 if not util.endswithsep(rootsep):
267 if not util.endswithsep(rootsep):
268 rootsep += os.sep
268 rootsep += os.sep
269 if cwd.startswith(rootsep):
269 if cwd.startswith(rootsep):
270 return cwd[len(rootsep):]
270 return cwd[len(rootsep):]
271 else:
271 else:
272 # we're outside the repo. return an absolute path.
272 # we're outside the repo. return an absolute path.
273 return cwd
273 return cwd
274
274
275 def pathto(self, f, cwd=None):
275 def pathto(self, f, cwd=None):
276 if cwd is None:
276 if cwd is None:
277 cwd = self.getcwd()
277 cwd = self.getcwd()
278 path = util.pathto(self._root, cwd, f)
278 path = util.pathto(self._root, cwd, f)
279 if self._slash:
279 if self._slash:
280 return util.pconvert(path)
280 return util.pconvert(path)
281 return path
281 return path
282
282
283 def __getitem__(self, key):
283 def __getitem__(self, key):
284 '''Return the current state of key (a filename) in the dirstate.
284 '''Return the current state of key (a filename) in the dirstate.
285
285
286 States are:
286 States are:
287 n normal
287 n normal
288 m needs merging
288 m needs merging
289 r marked for removal
289 r marked for removal
290 a marked for addition
290 a marked for addition
291 ? not tracked
291 ? not tracked
292 '''
292 '''
293 return self._map.get(key, ("?",))[0]
293 return self._map.get(key, ("?",))[0]
294
294
295 def __contains__(self, key):
295 def __contains__(self, key):
296 return key in self._map
296 return key in self._map
297
297
298 def __iter__(self):
298 def __iter__(self):
299 for x in sorted(self._map):
299 for x in sorted(self._map):
300 yield x
300 yield x
301
301
302 def iteritems(self):
302 def iteritems(self):
303 return self._map.iteritems()
303 return self._map.iteritems()
304
304
305 def parents(self):
305 def parents(self):
306 return [self._validate(p) for p in self._pl]
306 return [self._validate(p) for p in self._pl]
307
307
308 def p1(self):
308 def p1(self):
309 return self._validate(self._pl[0])
309 return self._validate(self._pl[0])
310
310
311 def p2(self):
311 def p2(self):
312 return self._validate(self._pl[1])
312 return self._validate(self._pl[1])
313
313
314 def branch(self):
314 def branch(self):
315 return encoding.tolocal(self._branch)
315 return encoding.tolocal(self._branch)
316
316
317 def setparents(self, p1, p2=nullid):
317 def setparents(self, p1, p2=nullid):
318 """Set dirstate parents to p1 and p2.
318 """Set dirstate parents to p1 and p2.
319
319
320 When moving from two parents to one, 'm' merged entries a
320 When moving from two parents to one, 'm' merged entries a
321 adjusted to normal and previous copy records discarded and
321 adjusted to normal and previous copy records discarded and
322 returned by the call.
322 returned by the call.
323
323
324 See localrepo.setparents()
324 See localrepo.setparents()
325 """
325 """
326 if self._parentwriters == 0:
326 if self._parentwriters == 0:
327 raise ValueError("cannot set dirstate parent without "
327 raise ValueError("cannot set dirstate parent without "
328 "calling dirstate.beginparentchange")
328 "calling dirstate.beginparentchange")
329
329
330 self._dirty = self._dirtypl = True
330 self._dirty = self._dirtypl = True
331 oldp2 = self._pl[1]
331 oldp2 = self._pl[1]
332 self._pl = p1, p2
332 self._pl = p1, p2
333 copies = {}
333 copies = {}
334 if oldp2 != nullid and p2 == nullid:
334 if oldp2 != nullid and p2 == nullid:
335 for f, s in self._map.iteritems():
335 for f, s in self._map.iteritems():
336 # Discard 'm' markers when moving away from a merge state
336 # Discard 'm' markers when moving away from a merge state
337 if s[0] == 'm':
337 if s[0] == 'm':
338 if f in self._copymap:
338 if f in self._copymap:
339 copies[f] = self._copymap[f]
339 copies[f] = self._copymap[f]
340 self.normallookup(f)
340 self.normallookup(f)
341 # Also fix up otherparent markers
341 # Also fix up otherparent markers
342 elif s[0] == 'n' and s[2] == -2:
342 elif s[0] == 'n' and s[2] == -2:
343 if f in self._copymap:
343 if f in self._copymap:
344 copies[f] = self._copymap[f]
344 copies[f] = self._copymap[f]
345 self.add(f)
345 self.add(f)
346 return copies
346 return copies
347
347
348 def setbranch(self, branch):
348 def setbranch(self, branch):
349 self._branch = encoding.fromlocal(branch)
349 self._branch = encoding.fromlocal(branch)
350 f = self._opener('branch', 'w', atomictemp=True)
350 f = self._opener('branch', 'w', atomictemp=True)
351 try:
351 try:
352 f.write(self._branch + '\n')
352 f.write(self._branch + '\n')
353 f.close()
353 f.close()
354
354
355 # make sure filecache has the correct stat info for _branch after
355 # make sure filecache has the correct stat info for _branch after
356 # replacing the underlying file
356 # replacing the underlying file
357 ce = self._filecache['_branch']
357 ce = self._filecache['_branch']
358 if ce:
358 if ce:
359 ce.refresh()
359 ce.refresh()
360 except: # re-raises
360 except: # re-raises
361 f.discard()
361 f.discard()
362 raise
362 raise
363
363
364 def _opendirstatefile(self):
364 def _opendirstatefile(self):
365 fp, mode = _trypending(self._root, self._opener, self._filename)
365 fp, mode = _trypending(self._root, self._opener, self._filename)
366 if self._pendingmode is not None and self._pendingmode != mode:
366 if self._pendingmode is not None and self._pendingmode != mode:
367 fp.close()
367 fp.close()
368 raise error.Abort(_('working directory state may be '
368 raise error.Abort(_('working directory state may be '
369 'changed parallelly'))
369 'changed parallelly'))
370 self._pendingmode = mode
370 self._pendingmode = mode
371 return fp
371 return fp
372
372
373 def _read(self):
373 def _read(self):
374 self._map = {}
374 self._map = {}
375 self._copymap = {}
375 self._copymap = {}
376 try:
376 try:
377 fp = self._opendirstatefile()
377 fp = self._opendirstatefile()
378 try:
378 try:
379 st = fp.read()
379 st = fp.read()
380 finally:
380 finally:
381 fp.close()
381 fp.close()
382 except IOError as err:
382 except IOError as err:
383 if err.errno != errno.ENOENT:
383 if err.errno != errno.ENOENT:
384 raise
384 raise
385 return
385 return
386 if not st:
386 if not st:
387 return
387 return
388
388
389 if util.safehasattr(parsers, 'dict_new_presized'):
389 if util.safehasattr(parsers, 'dict_new_presized'):
390 # Make an estimate of the number of files in the dirstate based on
390 # Make an estimate of the number of files in the dirstate based on
391 # its size. From a linear regression on a set of real-world repos,
391 # its size. From a linear regression on a set of real-world repos,
392 # all over 10,000 files, the size of a dirstate entry is 85
392 # all over 10,000 files, the size of a dirstate entry is 85
393 # bytes. The cost of resizing is significantly higher than the cost
393 # bytes. The cost of resizing is significantly higher than the cost
394 # of filling in a larger presized dict, so subtract 20% from the
394 # of filling in a larger presized dict, so subtract 20% from the
395 # size.
395 # size.
396 #
396 #
397 # This heuristic is imperfect in many ways, so in a future dirstate
397 # This heuristic is imperfect in many ways, so in a future dirstate
398 # format update it makes sense to just record the number of entries
398 # format update it makes sense to just record the number of entries
399 # on write.
399 # on write.
400 self._map = parsers.dict_new_presized(len(st) / 71)
400 self._map = parsers.dict_new_presized(len(st) / 71)
401
401
402 # Python's garbage collector triggers a GC each time a certain number
402 # Python's garbage collector triggers a GC each time a certain number
403 # of container objects (the number being defined by
403 # of container objects (the number being defined by
404 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
404 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
405 # for each file in the dirstate. The C version then immediately marks
405 # for each file in the dirstate. The C version then immediately marks
406 # them as not to be tracked by the collector. However, this has no
406 # them as not to be tracked by the collector. However, this has no
407 # effect on when GCs are triggered, only on what objects the GC looks
407 # effect on when GCs are triggered, only on what objects the GC looks
408 # into. This means that O(number of files) GCs are unavoidable.
408 # into. This means that O(number of files) GCs are unavoidable.
409 # Depending on when in the process's lifetime the dirstate is parsed,
409 # Depending on when in the process's lifetime the dirstate is parsed,
410 # this can get very expensive. As a workaround, disable GC while
410 # this can get very expensive. As a workaround, disable GC while
411 # parsing the dirstate.
411 # parsing the dirstate.
412 #
412 #
413 # (we cannot decorate the function directly since it is in a C module)
413 # (we cannot decorate the function directly since it is in a C module)
414 parse_dirstate = util.nogc(parsers.parse_dirstate)
414 parse_dirstate = util.nogc(parsers.parse_dirstate)
415 p = parse_dirstate(self._map, self._copymap, st)
415 p = parse_dirstate(self._map, self._copymap, st)
416 if not self._dirtypl:
416 if not self._dirtypl:
417 self._pl = p
417 self._pl = p
418
418
419 def invalidate(self):
419 def invalidate(self):
420 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
420 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
421 "_pl", "_dirs", "_ignore"):
421 "_pl", "_dirs", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427
427
428 def copy(self, source, dest):
428 def copy(self, source, dest):
429 """Mark dest as a copy of source. Unmark dest if source is None."""
429 """Mark dest as a copy of source. Unmark dest if source is None."""
430 if source == dest:
430 if source == dest:
431 return
431 return
432 self._dirty = True
432 self._dirty = True
433 if source is not None:
433 if source is not None:
434 self._copymap[dest] = source
434 self._copymap[dest] = source
435 elif dest in self._copymap:
435 elif dest in self._copymap:
436 del self._copymap[dest]
436 del self._copymap[dest]
437
437
438 def copied(self, file):
438 def copied(self, file):
439 return self._copymap.get(file, None)
439 return self._copymap.get(file, None)
440
440
441 def copies(self):
441 def copies(self):
442 return self._copymap
442 return self._copymap
443
443
444 def _droppath(self, f):
444 def _droppath(self, f):
445 if self[f] not in "?r" and "_dirs" in self.__dict__:
445 if self[f] not in "?r" and "_dirs" in self.__dict__:
446 self._dirs.delpath(f)
446 self._dirs.delpath(f)
447
447
448 if "_filefoldmap" in self.__dict__:
448 if "_filefoldmap" in self.__dict__:
449 normed = util.normcase(f)
449 normed = util.normcase(f)
450 if normed in self._filefoldmap:
450 if normed in self._filefoldmap:
451 del self._filefoldmap[normed]
451 del self._filefoldmap[normed]
452
452
453 def _addpath(self, f, state, mode, size, mtime):
453 def _addpath(self, f, state, mode, size, mtime):
454 oldstate = self[f]
454 oldstate = self[f]
455 if state == 'a' or oldstate == 'r':
455 if state == 'a' or oldstate == 'r':
456 scmutil.checkfilename(f)
456 scmutil.checkfilename(f)
457 if f in self._dirs:
457 if f in self._dirs:
458 raise error.Abort(_('directory %r already in dirstate') % f)
458 raise error.Abort(_('directory %r already in dirstate') % f)
459 # shadows
459 # shadows
460 for d in util.finddirs(f):
460 for d in util.finddirs(f):
461 if d in self._dirs:
461 if d in self._dirs:
462 break
462 break
463 if d in self._map and self[d] != 'r':
463 if d in self._map and self[d] != 'r':
464 raise error.Abort(
464 raise error.Abort(
465 _('file %r in dirstate clashes with %r') % (d, f))
465 _('file %r in dirstate clashes with %r') % (d, f))
466 if oldstate in "?r" and "_dirs" in self.__dict__:
466 if oldstate in "?r" and "_dirs" in self.__dict__:
467 self._dirs.addpath(f)
467 self._dirs.addpath(f)
468 self._dirty = True
468 self._dirty = True
469 self._map[f] = dirstatetuple(state, mode, size, mtime)
469 self._map[f] = dirstatetuple(state, mode, size, mtime)
470
470
471 def normal(self, f):
471 def normal(self, f):
472 '''Mark a file normal and clean.'''
472 '''Mark a file normal and clean.'''
473 s = os.lstat(self._join(f))
473 s = os.lstat(self._join(f))
474 mtime = s.st_mtime
474 mtime = s.st_mtime
475 self._addpath(f, 'n', s.st_mode,
475 self._addpath(f, 'n', s.st_mode,
476 s.st_size & _rangemask, mtime & _rangemask)
476 s.st_size & _rangemask, mtime & _rangemask)
477 if f in self._copymap:
477 if f in self._copymap:
478 del self._copymap[f]
478 del self._copymap[f]
479 if mtime > self._lastnormaltime:
479 if mtime > self._lastnormaltime:
480 # Remember the most recent modification timeslot for status(),
480 # Remember the most recent modification timeslot for status(),
481 # to make sure we won't miss future size-preserving file content
481 # to make sure we won't miss future size-preserving file content
482 # modifications that happen within the same timeslot.
482 # modifications that happen within the same timeslot.
483 self._lastnormaltime = mtime
483 self._lastnormaltime = mtime
484
484
485 def normallookup(self, f):
485 def normallookup(self, f):
486 '''Mark a file normal, but possibly dirty.'''
486 '''Mark a file normal, but possibly dirty.'''
487 if self._pl[1] != nullid and f in self._map:
487 if self._pl[1] != nullid and f in self._map:
488 # if there is a merge going on and the file was either
488 # if there is a merge going on and the file was either
489 # in state 'm' (-1) or coming from other parent (-2) before
489 # in state 'm' (-1) or coming from other parent (-2) before
490 # being removed, restore that state.
490 # being removed, restore that state.
491 entry = self._map[f]
491 entry = self._map[f]
492 if entry[0] == 'r' and entry[2] in (-1, -2):
492 if entry[0] == 'r' and entry[2] in (-1, -2):
493 source = self._copymap.get(f)
493 source = self._copymap.get(f)
494 if entry[2] == -1:
494 if entry[2] == -1:
495 self.merge(f)
495 self.merge(f)
496 elif entry[2] == -2:
496 elif entry[2] == -2:
497 self.otherparent(f)
497 self.otherparent(f)
498 if source:
498 if source:
499 self.copy(source, f)
499 self.copy(source, f)
500 return
500 return
501 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
501 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
502 return
502 return
503 self._addpath(f, 'n', 0, -1, -1)
503 self._addpath(f, 'n', 0, -1, -1)
504 if f in self._copymap:
504 if f in self._copymap:
505 del self._copymap[f]
505 del self._copymap[f]
506
506
507 def otherparent(self, f):
507 def otherparent(self, f):
508 '''Mark as coming from the other parent, always dirty.'''
508 '''Mark as coming from the other parent, always dirty.'''
509 if self._pl[1] == nullid:
509 if self._pl[1] == nullid:
510 raise error.Abort(_("setting %r to other parent "
510 raise error.Abort(_("setting %r to other parent "
511 "only allowed in merges") % f)
511 "only allowed in merges") % f)
512 if f in self and self[f] == 'n':
512 if f in self and self[f] == 'n':
513 # merge-like
513 # merge-like
514 self._addpath(f, 'm', 0, -2, -1)
514 self._addpath(f, 'm', 0, -2, -1)
515 else:
515 else:
516 # add-like
516 # add-like
517 self._addpath(f, 'n', 0, -2, -1)
517 self._addpath(f, 'n', 0, -2, -1)
518
518
519 if f in self._copymap:
519 if f in self._copymap:
520 del self._copymap[f]
520 del self._copymap[f]
521
521
522 def add(self, f):
522 def add(self, f):
523 '''Mark a file added.'''
523 '''Mark a file added.'''
524 self._addpath(f, 'a', 0, -1, -1)
524 self._addpath(f, 'a', 0, -1, -1)
525 if f in self._copymap:
525 if f in self._copymap:
526 del self._copymap[f]
526 del self._copymap[f]
527
527
528 def remove(self, f):
528 def remove(self, f):
529 '''Mark a file removed.'''
529 '''Mark a file removed.'''
530 self._dirty = True
530 self._dirty = True
531 self._droppath(f)
531 self._droppath(f)
532 size = 0
532 size = 0
533 if self._pl[1] != nullid and f in self._map:
533 if self._pl[1] != nullid and f in self._map:
534 # backup the previous state
534 # backup the previous state
535 entry = self._map[f]
535 entry = self._map[f]
536 if entry[0] == 'm': # merge
536 if entry[0] == 'm': # merge
537 size = -1
537 size = -1
538 elif entry[0] == 'n' and entry[2] == -2: # other parent
538 elif entry[0] == 'n' and entry[2] == -2: # other parent
539 size = -2
539 size = -2
540 self._map[f] = dirstatetuple('r', 0, size, 0)
540 self._map[f] = dirstatetuple('r', 0, size, 0)
541 if size == 0 and f in self._copymap:
541 if size == 0 and f in self._copymap:
542 del self._copymap[f]
542 del self._copymap[f]
543
543
544 def merge(self, f):
544 def merge(self, f):
545 '''Mark a file merged.'''
545 '''Mark a file merged.'''
546 if self._pl[1] == nullid:
546 if self._pl[1] == nullid:
547 return self.normallookup(f)
547 return self.normallookup(f)
548 return self.otherparent(f)
548 return self.otherparent(f)
549
549
550 def drop(self, f):
550 def drop(self, f):
551 '''Drop a file from the dirstate'''
551 '''Drop a file from the dirstate'''
552 if f in self._map:
552 if f in self._map:
553 self._dirty = True
553 self._dirty = True
554 self._droppath(f)
554 self._droppath(f)
555 del self._map[f]
555 del self._map[f]
556
556
557 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
557 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
558 if exists is None:
558 if exists is None:
559 exists = os.path.lexists(os.path.join(self._root, path))
559 exists = os.path.lexists(os.path.join(self._root, path))
560 if not exists:
560 if not exists:
561 # Maybe a path component exists
561 # Maybe a path component exists
562 if not ignoremissing and '/' in path:
562 if not ignoremissing and '/' in path:
563 d, f = path.rsplit('/', 1)
563 d, f = path.rsplit('/', 1)
564 d = self._normalize(d, False, ignoremissing, None)
564 d = self._normalize(d, False, ignoremissing, None)
565 folded = d + "/" + f
565 folded = d + "/" + f
566 else:
566 else:
567 # No path components, preserve original case
567 # No path components, preserve original case
568 folded = path
568 folded = path
569 else:
569 else:
570 # recursively normalize leading directory components
570 # recursively normalize leading directory components
571 # against dirstate
571 # against dirstate
572 if '/' in normed:
572 if '/' in normed:
573 d, f = normed.rsplit('/', 1)
573 d, f = normed.rsplit('/', 1)
574 d = self._normalize(d, False, ignoremissing, True)
574 d = self._normalize(d, False, ignoremissing, True)
575 r = self._root + "/" + d
575 r = self._root + "/" + d
576 folded = d + "/" + util.fspath(f, r)
576 folded = d + "/" + util.fspath(f, r)
577 else:
577 else:
578 folded = util.fspath(normed, self._root)
578 folded = util.fspath(normed, self._root)
579 storemap[normed] = folded
579 storemap[normed] = folded
580
580
581 return folded
581 return folded
582
582
583 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
583 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
584 normed = util.normcase(path)
584 normed = util.normcase(path)
585 folded = self._filefoldmap.get(normed, None)
585 folded = self._filefoldmap.get(normed, None)
586 if folded is None:
586 if folded is None:
587 if isknown:
587 if isknown:
588 folded = path
588 folded = path
589 else:
589 else:
590 folded = self._discoverpath(path, normed, ignoremissing, exists,
590 folded = self._discoverpath(path, normed, ignoremissing, exists,
591 self._filefoldmap)
591 self._filefoldmap)
592 return folded
592 return folded
593
593
594 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
594 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
595 normed = util.normcase(path)
595 normed = util.normcase(path)
596 folded = self._filefoldmap.get(normed, None)
596 folded = self._filefoldmap.get(normed, None)
597 if folded is None:
597 if folded is None:
598 folded = self._dirfoldmap.get(normed, None)
598 folded = self._dirfoldmap.get(normed, None)
599 if folded is None:
599 if folded is None:
600 if isknown:
600 if isknown:
601 folded = path
601 folded = path
602 else:
602 else:
603 # store discovered result in dirfoldmap so that future
603 # store discovered result in dirfoldmap so that future
604 # normalizefile calls don't start matching directories
604 # normalizefile calls don't start matching directories
605 folded = self._discoverpath(path, normed, ignoremissing, exists,
605 folded = self._discoverpath(path, normed, ignoremissing, exists,
606 self._dirfoldmap)
606 self._dirfoldmap)
607 return folded
607 return folded
608
608
609 def normalize(self, path, isknown=False, ignoremissing=False):
609 def normalize(self, path, isknown=False, ignoremissing=False):
610 '''
610 '''
611 normalize the case of a pathname when on a casefolding filesystem
611 normalize the case of a pathname when on a casefolding filesystem
612
612
613 isknown specifies whether the filename came from walking the
613 isknown specifies whether the filename came from walking the
614 disk, to avoid extra filesystem access.
614 disk, to avoid extra filesystem access.
615
615
616 If ignoremissing is True, missing path are returned
616 If ignoremissing is True, missing path are returned
617 unchanged. Otherwise, we try harder to normalize possibly
617 unchanged. Otherwise, we try harder to normalize possibly
618 existing path components.
618 existing path components.
619
619
620 The normalized case is determined based on the following precedence:
620 The normalized case is determined based on the following precedence:
621
621
622 - version of name already stored in the dirstate
622 - version of name already stored in the dirstate
623 - version of name stored on disk
623 - version of name stored on disk
624 - version provided via command arguments
624 - version provided via command arguments
625 '''
625 '''
626
626
627 if self._checkcase:
627 if self._checkcase:
628 return self._normalize(path, isknown, ignoremissing)
628 return self._normalize(path, isknown, ignoremissing)
629 return path
629 return path
630
630
631 def clear(self):
631 def clear(self):
632 self._map = {}
632 self._map = {}
633 if "_dirs" in self.__dict__:
633 if "_dirs" in self.__dict__:
634 delattr(self, "_dirs")
634 delattr(self, "_dirs")
635 self._copymap = {}
635 self._copymap = {}
636 self._pl = [nullid, nullid]
636 self._pl = [nullid, nullid]
637 self._lastnormaltime = 0
637 self._lastnormaltime = 0
638 self._dirty = True
638 self._dirty = True
639
639
640 def rebuild(self, parent, allfiles, changedfiles=None):
640 def rebuild(self, parent, allfiles, changedfiles=None):
641 if changedfiles is None:
641 if changedfiles is None:
642 # Rebuild entire dirstate
642 # Rebuild entire dirstate
643 changedfiles = allfiles
643 changedfiles = allfiles
644 lastnormaltime = self._lastnormaltime
644 lastnormaltime = self._lastnormaltime
645 self.clear()
645 self.clear()
646 self._lastnormaltime = lastnormaltime
646 self._lastnormaltime = lastnormaltime
647
647
648 for f in changedfiles:
648 for f in changedfiles:
649 mode = 0o666
649 mode = 0o666
650 if f in allfiles and 'x' in allfiles.flags(f):
650 if f in allfiles and 'x' in allfiles.flags(f):
651 mode = 0o777
651 mode = 0o777
652
652
653 if f in allfiles:
653 if f in allfiles:
654 self._map[f] = dirstatetuple('n', mode, -1, 0)
654 self._map[f] = dirstatetuple('n', mode, -1, 0)
655 else:
655 else:
656 self._map.pop(f, None)
656 self._map.pop(f, None)
657
657
658 self._pl = (parent, nullid)
658 self._pl = (parent, nullid)
659 self._dirty = True
659 self._dirty = True
660
660
661 def write(self, tr=False):
661 def write(self, tr=False):
662 if not self._dirty:
662 if not self._dirty:
663 return
663 return
664
664
665 filename = self._filename
665 filename = self._filename
666 if tr is False: # not explicitly specified
666 if tr is False: # not explicitly specified
667 if (self._ui.configbool('devel', 'all-warnings')
667 if (self._ui.configbool('devel', 'all-warnings')
668 or self._ui.configbool('devel', 'check-dirstate-write')):
668 or self._ui.configbool('devel', 'check-dirstate-write')):
669 self._ui.develwarn('use dirstate.write with '
669 self._ui.develwarn('use dirstate.write with '
670 'repo.currenttransaction()')
670 'repo.currenttransaction()')
671
671
672 if self._opener.lexists(self._pendingfilename):
672 if self._opener.lexists(self._pendingfilename):
673 # if pending file already exists, in-memory changes
673 # if pending file already exists, in-memory changes
674 # should be written into it, because it has priority
674 # should be written into it, because it has priority
675 # to '.hg/dirstate' at reading under HG_PENDING mode
675 # to '.hg/dirstate' at reading under HG_PENDING mode
676 filename = self._pendingfilename
676 filename = self._pendingfilename
677 elif tr:
677 elif tr:
678 # 'dirstate.write()' is not only for writing in-memory
678 # 'dirstate.write()' is not only for writing in-memory
679 # changes out, but also for dropping ambiguous timestamp.
679 # changes out, but also for dropping ambiguous timestamp.
680 # delayed writing re-raise "ambiguous timestamp issue".
680 # delayed writing re-raise "ambiguous timestamp issue".
681 # See also the wiki page below for detail:
681 # See also the wiki page below for detail:
682 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
682 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
683
683
684 # emulate dropping timestamp in 'parsers.pack_dirstate'
684 # emulate dropping timestamp in 'parsers.pack_dirstate'
685 now = _getfsnow(self._opener)
685 now = _getfsnow(self._opener)
686 dmap = self._map
686 dmap = self._map
687 for f, e in dmap.iteritems():
687 for f, e in dmap.iteritems():
688 if e[0] == 'n' and e[3] == now:
688 if e[0] == 'n' and e[3] == now:
689 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
689 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
690
690
691 # emulate that all 'dirstate.normal' results are written out
691 # emulate that all 'dirstate.normal' results are written out
692 self._lastnormaltime = 0
692 self._lastnormaltime = 0
693
693
694 # delay writing in-memory changes out
694 # delay writing in-memory changes out
695 tr.addfilegenerator('dirstate', (self._filename,),
695 tr.addfilegenerator('dirstate', (self._filename,),
696 self._writedirstate, location='plain')
696 self._writedirstate, location='plain')
697 return
697 return
698
698
699 st = self._opener(filename, "w", atomictemp=True)
699 st = self._opener(filename, "w", atomictemp=True)
700 self._writedirstate(st)
700 self._writedirstate(st)
701
701
702 def _writedirstate(self, st):
702 def _writedirstate(self, st):
703 # use the modification time of the newly created temporary file as the
703 # use the modification time of the newly created temporary file as the
704 # filesystem's notion of 'now'
704 # filesystem's notion of 'now'
705 now = util.fstat(st).st_mtime & _rangemask
705 now = util.fstat(st).st_mtime & _rangemask
706
706
707 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
707 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
708 # timestamp of each entries in dirstate, because of 'now > mtime'
708 # timestamp of each entries in dirstate, because of 'now > mtime'
709 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
709 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
710 if delaywrite > 0:
710 if delaywrite > 0:
711 # do we have any files to delay for?
712 for f, e in self._map.iteritems():
713 if e[0] == 'n' and e[3] == now:
711 import time # to avoid useless import
714 import time # to avoid useless import
712 time.sleep(delaywrite)
715 time.sleep(delaywrite)
716 break
713
717
714 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
718 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
715 st.close()
719 st.close()
716 self._lastnormaltime = 0
720 self._lastnormaltime = 0
717 self._dirty = self._dirtypl = False
721 self._dirty = self._dirtypl = False
718
722
719 def _dirignore(self, f):
723 def _dirignore(self, f):
720 if f == '.':
724 if f == '.':
721 return False
725 return False
722 if self._ignore(f):
726 if self._ignore(f):
723 return True
727 return True
724 for p in util.finddirs(f):
728 for p in util.finddirs(f):
725 if self._ignore(p):
729 if self._ignore(p):
726 return True
730 return True
727 return False
731 return False
728
732
729 def _walkexplicit(self, match, subrepos):
733 def _walkexplicit(self, match, subrepos):
730 '''Get stat data about the files explicitly specified by match.
734 '''Get stat data about the files explicitly specified by match.
731
735
732 Return a triple (results, dirsfound, dirsnotfound).
736 Return a triple (results, dirsfound, dirsnotfound).
733 - results is a mapping from filename to stat result. It also contains
737 - results is a mapping from filename to stat result. It also contains
734 listings mapping subrepos and .hg to None.
738 listings mapping subrepos and .hg to None.
735 - dirsfound is a list of files found to be directories.
739 - dirsfound is a list of files found to be directories.
736 - dirsnotfound is a list of files that the dirstate thinks are
740 - dirsnotfound is a list of files that the dirstate thinks are
737 directories and that were not found.'''
741 directories and that were not found.'''
738
742
739 def badtype(mode):
743 def badtype(mode):
740 kind = _('unknown')
744 kind = _('unknown')
741 if stat.S_ISCHR(mode):
745 if stat.S_ISCHR(mode):
742 kind = _('character device')
746 kind = _('character device')
743 elif stat.S_ISBLK(mode):
747 elif stat.S_ISBLK(mode):
744 kind = _('block device')
748 kind = _('block device')
745 elif stat.S_ISFIFO(mode):
749 elif stat.S_ISFIFO(mode):
746 kind = _('fifo')
750 kind = _('fifo')
747 elif stat.S_ISSOCK(mode):
751 elif stat.S_ISSOCK(mode):
748 kind = _('socket')
752 kind = _('socket')
749 elif stat.S_ISDIR(mode):
753 elif stat.S_ISDIR(mode):
750 kind = _('directory')
754 kind = _('directory')
751 return _('unsupported file type (type is %s)') % kind
755 return _('unsupported file type (type is %s)') % kind
752
756
753 matchedir = match.explicitdir
757 matchedir = match.explicitdir
754 badfn = match.bad
758 badfn = match.bad
755 dmap = self._map
759 dmap = self._map
756 lstat = os.lstat
760 lstat = os.lstat
757 getkind = stat.S_IFMT
761 getkind = stat.S_IFMT
758 dirkind = stat.S_IFDIR
762 dirkind = stat.S_IFDIR
759 regkind = stat.S_IFREG
763 regkind = stat.S_IFREG
760 lnkkind = stat.S_IFLNK
764 lnkkind = stat.S_IFLNK
761 join = self._join
765 join = self._join
762 dirsfound = []
766 dirsfound = []
763 foundadd = dirsfound.append
767 foundadd = dirsfound.append
764 dirsnotfound = []
768 dirsnotfound = []
765 notfoundadd = dirsnotfound.append
769 notfoundadd = dirsnotfound.append
766
770
767 if not match.isexact() and self._checkcase:
771 if not match.isexact() and self._checkcase:
768 normalize = self._normalize
772 normalize = self._normalize
769 else:
773 else:
770 normalize = None
774 normalize = None
771
775
772 files = sorted(match.files())
776 files = sorted(match.files())
773 subrepos.sort()
777 subrepos.sort()
774 i, j = 0, 0
778 i, j = 0, 0
775 while i < len(files) and j < len(subrepos):
779 while i < len(files) and j < len(subrepos):
776 subpath = subrepos[j] + "/"
780 subpath = subrepos[j] + "/"
777 if files[i] < subpath:
781 if files[i] < subpath:
778 i += 1
782 i += 1
779 continue
783 continue
780 while i < len(files) and files[i].startswith(subpath):
784 while i < len(files) and files[i].startswith(subpath):
781 del files[i]
785 del files[i]
782 j += 1
786 j += 1
783
787
784 if not files or '.' in files:
788 if not files or '.' in files:
785 files = ['.']
789 files = ['.']
786 results = dict.fromkeys(subrepos)
790 results = dict.fromkeys(subrepos)
787 results['.hg'] = None
791 results['.hg'] = None
788
792
789 alldirs = None
793 alldirs = None
790 for ff in files:
794 for ff in files:
791 # constructing the foldmap is expensive, so don't do it for the
795 # constructing the foldmap is expensive, so don't do it for the
792 # common case where files is ['.']
796 # common case where files is ['.']
793 if normalize and ff != '.':
797 if normalize and ff != '.':
794 nf = normalize(ff, False, True)
798 nf = normalize(ff, False, True)
795 else:
799 else:
796 nf = ff
800 nf = ff
797 if nf in results:
801 if nf in results:
798 continue
802 continue
799
803
800 try:
804 try:
801 st = lstat(join(nf))
805 st = lstat(join(nf))
802 kind = getkind(st.st_mode)
806 kind = getkind(st.st_mode)
803 if kind == dirkind:
807 if kind == dirkind:
804 if nf in dmap:
808 if nf in dmap:
805 # file replaced by dir on disk but still in dirstate
809 # file replaced by dir on disk but still in dirstate
806 results[nf] = None
810 results[nf] = None
807 if matchedir:
811 if matchedir:
808 matchedir(nf)
812 matchedir(nf)
809 foundadd((nf, ff))
813 foundadd((nf, ff))
810 elif kind == regkind or kind == lnkkind:
814 elif kind == regkind or kind == lnkkind:
811 results[nf] = st
815 results[nf] = st
812 else:
816 else:
813 badfn(ff, badtype(kind))
817 badfn(ff, badtype(kind))
814 if nf in dmap:
818 if nf in dmap:
815 results[nf] = None
819 results[nf] = None
816 except OSError as inst: # nf not found on disk - it is dirstate only
820 except OSError as inst: # nf not found on disk - it is dirstate only
817 if nf in dmap: # does it exactly match a missing file?
821 if nf in dmap: # does it exactly match a missing file?
818 results[nf] = None
822 results[nf] = None
819 else: # does it match a missing directory?
823 else: # does it match a missing directory?
820 if alldirs is None:
824 if alldirs is None:
821 alldirs = util.dirs(dmap)
825 alldirs = util.dirs(dmap)
822 if nf in alldirs:
826 if nf in alldirs:
823 if matchedir:
827 if matchedir:
824 matchedir(nf)
828 matchedir(nf)
825 notfoundadd(nf)
829 notfoundadd(nf)
826 else:
830 else:
827 badfn(ff, inst.strerror)
831 badfn(ff, inst.strerror)
828
832
829 # Case insensitive filesystems cannot rely on lstat() failing to detect
833 # Case insensitive filesystems cannot rely on lstat() failing to detect
830 # a case-only rename. Prune the stat object for any file that does not
834 # a case-only rename. Prune the stat object for any file that does not
831 # match the case in the filesystem, if there are multiple files that
835 # match the case in the filesystem, if there are multiple files that
832 # normalize to the same path.
836 # normalize to the same path.
833 if match.isexact() and self._checkcase:
837 if match.isexact() and self._checkcase:
834 normed = {}
838 normed = {}
835
839
836 for f, st in results.iteritems():
840 for f, st in results.iteritems():
837 if st is None:
841 if st is None:
838 continue
842 continue
839
843
840 nc = util.normcase(f)
844 nc = util.normcase(f)
841 paths = normed.get(nc)
845 paths = normed.get(nc)
842
846
843 if paths is None:
847 if paths is None:
844 paths = set()
848 paths = set()
845 normed[nc] = paths
849 normed[nc] = paths
846
850
847 paths.add(f)
851 paths.add(f)
848
852
849 for norm, paths in normed.iteritems():
853 for norm, paths in normed.iteritems():
850 if len(paths) > 1:
854 if len(paths) > 1:
851 for path in paths:
855 for path in paths:
852 folded = self._discoverpath(path, norm, True, None,
856 folded = self._discoverpath(path, norm, True, None,
853 self._dirfoldmap)
857 self._dirfoldmap)
854 if path != folded:
858 if path != folded:
855 results[path] = None
859 results[path] = None
856
860
857 return results, dirsfound, dirsnotfound
861 return results, dirsfound, dirsnotfound
858
862
859 def walk(self, match, subrepos, unknown, ignored, full=True):
863 def walk(self, match, subrepos, unknown, ignored, full=True):
860 '''
864 '''
861 Walk recursively through the directory tree, finding all files
865 Walk recursively through the directory tree, finding all files
862 matched by match.
866 matched by match.
863
867
864 If full is False, maybe skip some known-clean files.
868 If full is False, maybe skip some known-clean files.
865
869
866 Return a dict mapping filename to stat-like object (either
870 Return a dict mapping filename to stat-like object (either
867 mercurial.osutil.stat instance or return value of os.stat()).
871 mercurial.osutil.stat instance or return value of os.stat()).
868
872
869 '''
873 '''
870 # full is a flag that extensions that hook into walk can use -- this
874 # full is a flag that extensions that hook into walk can use -- this
871 # implementation doesn't use it at all. This satisfies the contract
875 # implementation doesn't use it at all. This satisfies the contract
872 # because we only guarantee a "maybe".
876 # because we only guarantee a "maybe".
873
877
874 if ignored:
878 if ignored:
875 ignore = util.never
879 ignore = util.never
876 dirignore = util.never
880 dirignore = util.never
877 elif unknown:
881 elif unknown:
878 ignore = self._ignore
882 ignore = self._ignore
879 dirignore = self._dirignore
883 dirignore = self._dirignore
880 else:
884 else:
881 # if not unknown and not ignored, drop dir recursion and step 2
885 # if not unknown and not ignored, drop dir recursion and step 2
882 ignore = util.always
886 ignore = util.always
883 dirignore = util.always
887 dirignore = util.always
884
888
885 matchfn = match.matchfn
889 matchfn = match.matchfn
886 matchalways = match.always()
890 matchalways = match.always()
887 matchtdir = match.traversedir
891 matchtdir = match.traversedir
888 dmap = self._map
892 dmap = self._map
889 listdir = osutil.listdir
893 listdir = osutil.listdir
890 lstat = os.lstat
894 lstat = os.lstat
891 dirkind = stat.S_IFDIR
895 dirkind = stat.S_IFDIR
892 regkind = stat.S_IFREG
896 regkind = stat.S_IFREG
893 lnkkind = stat.S_IFLNK
897 lnkkind = stat.S_IFLNK
894 join = self._join
898 join = self._join
895
899
896 exact = skipstep3 = False
900 exact = skipstep3 = False
897 if match.isexact(): # match.exact
901 if match.isexact(): # match.exact
898 exact = True
902 exact = True
899 dirignore = util.always # skip step 2
903 dirignore = util.always # skip step 2
900 elif match.prefix(): # match.match, no patterns
904 elif match.prefix(): # match.match, no patterns
901 skipstep3 = True
905 skipstep3 = True
902
906
903 if not exact and self._checkcase:
907 if not exact and self._checkcase:
904 normalize = self._normalize
908 normalize = self._normalize
905 normalizefile = self._normalizefile
909 normalizefile = self._normalizefile
906 skipstep3 = False
910 skipstep3 = False
907 else:
911 else:
908 normalize = self._normalize
912 normalize = self._normalize
909 normalizefile = None
913 normalizefile = None
910
914
911 # step 1: find all explicit files
915 # step 1: find all explicit files
912 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
916 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
913
917
914 skipstep3 = skipstep3 and not (work or dirsnotfound)
918 skipstep3 = skipstep3 and not (work or dirsnotfound)
915 work = [d for d in work if not dirignore(d[0])]
919 work = [d for d in work if not dirignore(d[0])]
916
920
917 # step 2: visit subdirectories
921 # step 2: visit subdirectories
918 def traverse(work, alreadynormed):
922 def traverse(work, alreadynormed):
919 wadd = work.append
923 wadd = work.append
920 while work:
924 while work:
921 nd = work.pop()
925 nd = work.pop()
922 skip = None
926 skip = None
923 if nd == '.':
927 if nd == '.':
924 nd = ''
928 nd = ''
925 else:
929 else:
926 skip = '.hg'
930 skip = '.hg'
927 try:
931 try:
928 entries = listdir(join(nd), stat=True, skip=skip)
932 entries = listdir(join(nd), stat=True, skip=skip)
929 except OSError as inst:
933 except OSError as inst:
930 if inst.errno in (errno.EACCES, errno.ENOENT):
934 if inst.errno in (errno.EACCES, errno.ENOENT):
931 match.bad(self.pathto(nd), inst.strerror)
935 match.bad(self.pathto(nd), inst.strerror)
932 continue
936 continue
933 raise
937 raise
934 for f, kind, st in entries:
938 for f, kind, st in entries:
935 if normalizefile:
939 if normalizefile:
936 # even though f might be a directory, we're only
940 # even though f might be a directory, we're only
937 # interested in comparing it to files currently in the
941 # interested in comparing it to files currently in the
938 # dmap -- therefore normalizefile is enough
942 # dmap -- therefore normalizefile is enough
939 nf = normalizefile(nd and (nd + "/" + f) or f, True,
943 nf = normalizefile(nd and (nd + "/" + f) or f, True,
940 True)
944 True)
941 else:
945 else:
942 nf = nd and (nd + "/" + f) or f
946 nf = nd and (nd + "/" + f) or f
943 if nf not in results:
947 if nf not in results:
944 if kind == dirkind:
948 if kind == dirkind:
945 if not ignore(nf):
949 if not ignore(nf):
946 if matchtdir:
950 if matchtdir:
947 matchtdir(nf)
951 matchtdir(nf)
948 wadd(nf)
952 wadd(nf)
949 if nf in dmap and (matchalways or matchfn(nf)):
953 if nf in dmap and (matchalways or matchfn(nf)):
950 results[nf] = None
954 results[nf] = None
951 elif kind == regkind or kind == lnkkind:
955 elif kind == regkind or kind == lnkkind:
952 if nf in dmap:
956 if nf in dmap:
953 if matchalways or matchfn(nf):
957 if matchalways or matchfn(nf):
954 results[nf] = st
958 results[nf] = st
955 elif ((matchalways or matchfn(nf))
959 elif ((matchalways or matchfn(nf))
956 and not ignore(nf)):
960 and not ignore(nf)):
957 # unknown file -- normalize if necessary
961 # unknown file -- normalize if necessary
958 if not alreadynormed:
962 if not alreadynormed:
959 nf = normalize(nf, False, True)
963 nf = normalize(nf, False, True)
960 results[nf] = st
964 results[nf] = st
961 elif nf in dmap and (matchalways or matchfn(nf)):
965 elif nf in dmap and (matchalways or matchfn(nf)):
962 results[nf] = None
966 results[nf] = None
963
967
964 for nd, d in work:
968 for nd, d in work:
965 # alreadynormed means that processwork doesn't have to do any
969 # alreadynormed means that processwork doesn't have to do any
966 # expensive directory normalization
970 # expensive directory normalization
967 alreadynormed = not normalize or nd == d
971 alreadynormed = not normalize or nd == d
968 traverse([d], alreadynormed)
972 traverse([d], alreadynormed)
969
973
970 for s in subrepos:
974 for s in subrepos:
971 del results[s]
975 del results[s]
972 del results['.hg']
976 del results['.hg']
973
977
974 # step 3: visit remaining files from dmap
978 # step 3: visit remaining files from dmap
975 if not skipstep3 and not exact:
979 if not skipstep3 and not exact:
976 # If a dmap file is not in results yet, it was either
980 # If a dmap file is not in results yet, it was either
977 # a) not matching matchfn b) ignored, c) missing, or d) under a
981 # a) not matching matchfn b) ignored, c) missing, or d) under a
978 # symlink directory.
982 # symlink directory.
979 if not results and matchalways:
983 if not results and matchalways:
980 visit = dmap.keys()
984 visit = dmap.keys()
981 else:
985 else:
982 visit = [f for f in dmap if f not in results and matchfn(f)]
986 visit = [f for f in dmap if f not in results and matchfn(f)]
983 visit.sort()
987 visit.sort()
984
988
985 if unknown:
989 if unknown:
986 # unknown == True means we walked all dirs under the roots
990 # unknown == True means we walked all dirs under the roots
987 # that wasn't ignored, and everything that matched was stat'ed
991 # that wasn't ignored, and everything that matched was stat'ed
988 # and is already in results.
992 # and is already in results.
989 # The rest must thus be ignored or under a symlink.
993 # The rest must thus be ignored or under a symlink.
990 audit_path = pathutil.pathauditor(self._root)
994 audit_path = pathutil.pathauditor(self._root)
991
995
992 for nf in iter(visit):
996 for nf in iter(visit):
993 # If a stat for the same file was already added with a
997 # If a stat for the same file was already added with a
994 # different case, don't add one for this, since that would
998 # different case, don't add one for this, since that would
995 # make it appear as if the file exists under both names
999 # make it appear as if the file exists under both names
996 # on disk.
1000 # on disk.
997 if (normalizefile and
1001 if (normalizefile and
998 normalizefile(nf, True, True) in results):
1002 normalizefile(nf, True, True) in results):
999 results[nf] = None
1003 results[nf] = None
1000 # Report ignored items in the dmap as long as they are not
1004 # Report ignored items in the dmap as long as they are not
1001 # under a symlink directory.
1005 # under a symlink directory.
1002 elif audit_path.check(nf):
1006 elif audit_path.check(nf):
1003 try:
1007 try:
1004 results[nf] = lstat(join(nf))
1008 results[nf] = lstat(join(nf))
1005 # file was just ignored, no links, and exists
1009 # file was just ignored, no links, and exists
1006 except OSError:
1010 except OSError:
1007 # file doesn't exist
1011 # file doesn't exist
1008 results[nf] = None
1012 results[nf] = None
1009 else:
1013 else:
1010 # It's either missing or under a symlink directory
1014 # It's either missing or under a symlink directory
1011 # which we in this case report as missing
1015 # which we in this case report as missing
1012 results[nf] = None
1016 results[nf] = None
1013 else:
1017 else:
1014 # We may not have walked the full directory tree above,
1018 # We may not have walked the full directory tree above,
1015 # so stat and check everything we missed.
1019 # so stat and check everything we missed.
1016 nf = iter(visit).next
1020 nf = iter(visit).next
1017 for st in util.statfiles([join(i) for i in visit]):
1021 for st in util.statfiles([join(i) for i in visit]):
1018 results[nf()] = st
1022 results[nf()] = st
1019 return results
1023 return results
1020
1024
1021 def status(self, match, subrepos, ignored, clean, unknown):
1025 def status(self, match, subrepos, ignored, clean, unknown):
1022 '''Determine the status of the working copy relative to the
1026 '''Determine the status of the working copy relative to the
1023 dirstate and return a pair of (unsure, status), where status is of type
1027 dirstate and return a pair of (unsure, status), where status is of type
1024 scmutil.status and:
1028 scmutil.status and:
1025
1029
1026 unsure:
1030 unsure:
1027 files that might have been modified since the dirstate was
1031 files that might have been modified since the dirstate was
1028 written, but need to be read to be sure (size is the same
1032 written, but need to be read to be sure (size is the same
1029 but mtime differs)
1033 but mtime differs)
1030 status.modified:
1034 status.modified:
1031 files that have definitely been modified since the dirstate
1035 files that have definitely been modified since the dirstate
1032 was written (different size or mode)
1036 was written (different size or mode)
1033 status.clean:
1037 status.clean:
1034 files that have definitely not been modified since the
1038 files that have definitely not been modified since the
1035 dirstate was written
1039 dirstate was written
1036 '''
1040 '''
1037 listignored, listclean, listunknown = ignored, clean, unknown
1041 listignored, listclean, listunknown = ignored, clean, unknown
1038 lookup, modified, added, unknown, ignored = [], [], [], [], []
1042 lookup, modified, added, unknown, ignored = [], [], [], [], []
1039 removed, deleted, clean = [], [], []
1043 removed, deleted, clean = [], [], []
1040
1044
1041 dmap = self._map
1045 dmap = self._map
1042 ladd = lookup.append # aka "unsure"
1046 ladd = lookup.append # aka "unsure"
1043 madd = modified.append
1047 madd = modified.append
1044 aadd = added.append
1048 aadd = added.append
1045 uadd = unknown.append
1049 uadd = unknown.append
1046 iadd = ignored.append
1050 iadd = ignored.append
1047 radd = removed.append
1051 radd = removed.append
1048 dadd = deleted.append
1052 dadd = deleted.append
1049 cadd = clean.append
1053 cadd = clean.append
1050 mexact = match.exact
1054 mexact = match.exact
1051 dirignore = self._dirignore
1055 dirignore = self._dirignore
1052 checkexec = self._checkexec
1056 checkexec = self._checkexec
1053 copymap = self._copymap
1057 copymap = self._copymap
1054 lastnormaltime = self._lastnormaltime
1058 lastnormaltime = self._lastnormaltime
1055
1059
1056 # We need to do full walks when either
1060 # We need to do full walks when either
1057 # - we're listing all clean files, or
1061 # - we're listing all clean files, or
1058 # - match.traversedir does something, because match.traversedir should
1062 # - match.traversedir does something, because match.traversedir should
1059 # be called for every dir in the working dir
1063 # be called for every dir in the working dir
1060 full = listclean or match.traversedir is not None
1064 full = listclean or match.traversedir is not None
1061 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1065 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1062 full=full).iteritems():
1066 full=full).iteritems():
1063 if fn not in dmap:
1067 if fn not in dmap:
1064 if (listignored or mexact(fn)) and dirignore(fn):
1068 if (listignored or mexact(fn)) and dirignore(fn):
1065 if listignored:
1069 if listignored:
1066 iadd(fn)
1070 iadd(fn)
1067 else:
1071 else:
1068 uadd(fn)
1072 uadd(fn)
1069 continue
1073 continue
1070
1074
1071 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1075 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1072 # written like that for performance reasons. dmap[fn] is not a
1076 # written like that for performance reasons. dmap[fn] is not a
1073 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1077 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1074 # opcode has fast paths when the value to be unpacked is a tuple or
1078 # opcode has fast paths when the value to be unpacked is a tuple or
1075 # a list, but falls back to creating a full-fledged iterator in
1079 # a list, but falls back to creating a full-fledged iterator in
1076 # general. That is much slower than simply accessing and storing the
1080 # general. That is much slower than simply accessing and storing the
1077 # tuple members one by one.
1081 # tuple members one by one.
1078 t = dmap[fn]
1082 t = dmap[fn]
1079 state = t[0]
1083 state = t[0]
1080 mode = t[1]
1084 mode = t[1]
1081 size = t[2]
1085 size = t[2]
1082 time = t[3]
1086 time = t[3]
1083
1087
1084 if not st and state in "nma":
1088 if not st and state in "nma":
1085 dadd(fn)
1089 dadd(fn)
1086 elif state == 'n':
1090 elif state == 'n':
1087 if (size >= 0 and
1091 if (size >= 0 and
1088 ((size != st.st_size and size != st.st_size & _rangemask)
1092 ((size != st.st_size and size != st.st_size & _rangemask)
1089 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1093 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1090 or size == -2 # other parent
1094 or size == -2 # other parent
1091 or fn in copymap):
1095 or fn in copymap):
1092 madd(fn)
1096 madd(fn)
1093 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1097 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1094 ladd(fn)
1098 ladd(fn)
1095 elif st.st_mtime == lastnormaltime:
1099 elif st.st_mtime == lastnormaltime:
1096 # fn may have just been marked as normal and it may have
1100 # fn may have just been marked as normal and it may have
1097 # changed in the same second without changing its size.
1101 # changed in the same second without changing its size.
1098 # This can happen if we quickly do multiple commits.
1102 # This can happen if we quickly do multiple commits.
1099 # Force lookup, so we don't miss such a racy file change.
1103 # Force lookup, so we don't miss such a racy file change.
1100 ladd(fn)
1104 ladd(fn)
1101 elif listclean:
1105 elif listclean:
1102 cadd(fn)
1106 cadd(fn)
1103 elif state == 'm':
1107 elif state == 'm':
1104 madd(fn)
1108 madd(fn)
1105 elif state == 'a':
1109 elif state == 'a':
1106 aadd(fn)
1110 aadd(fn)
1107 elif state == 'r':
1111 elif state == 'r':
1108 radd(fn)
1112 radd(fn)
1109
1113
1110 return (lookup, scmutil.status(modified, added, removed, deleted,
1114 return (lookup, scmutil.status(modified, added, removed, deleted,
1111 unknown, ignored, clean))
1115 unknown, ignored, clean))
1112
1116
1113 def matches(self, match):
1117 def matches(self, match):
1114 '''
1118 '''
1115 return files in the dirstate (in whatever state) filtered by match
1119 return files in the dirstate (in whatever state) filtered by match
1116 '''
1120 '''
1117 dmap = self._map
1121 dmap = self._map
1118 if match.always():
1122 if match.always():
1119 return dmap.keys()
1123 return dmap.keys()
1120 files = match.files()
1124 files = match.files()
1121 if match.isexact():
1125 if match.isexact():
1122 # fast path -- filter the other way around, since typically files is
1126 # fast path -- filter the other way around, since typically files is
1123 # much smaller than dmap
1127 # much smaller than dmap
1124 return [f for f in files if f in dmap]
1128 return [f for f in files if f in dmap]
1125 if match.prefix() and all(fn in dmap for fn in files):
1129 if match.prefix() and all(fn in dmap for fn in files):
1126 # fast path -- all the values are known to be files, so just return
1130 # fast path -- all the values are known to be files, so just return
1127 # that
1131 # that
1128 return list(files)
1132 return list(files)
1129 return [f for f in dmap if match(f)]
1133 return [f for f in dmap if match(f)]
1130
1134
1131 def _actualfilename(self, tr):
1135 def _actualfilename(self, tr):
1132 if tr:
1136 if tr:
1133 return self._pendingfilename
1137 return self._pendingfilename
1134 else:
1138 else:
1135 return self._filename
1139 return self._filename
1136
1140
1137 def _savebackup(self, tr, suffix):
1141 def _savebackup(self, tr, suffix):
1138 '''Save current dirstate into backup file with suffix'''
1142 '''Save current dirstate into backup file with suffix'''
1139 filename = self._actualfilename(tr)
1143 filename = self._actualfilename(tr)
1140
1144
1141 # use '_writedirstate' instead of 'write' to write changes certainly,
1145 # use '_writedirstate' instead of 'write' to write changes certainly,
1142 # because the latter omits writing out if transaction is running.
1146 # because the latter omits writing out if transaction is running.
1143 # output file will be used to create backup of dirstate at this point.
1147 # output file will be used to create backup of dirstate at this point.
1144 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1148 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1145
1149
1146 if tr:
1150 if tr:
1147 # ensure that subsequent tr.writepending returns True for
1151 # ensure that subsequent tr.writepending returns True for
1148 # changes written out above, even if dirstate is never
1152 # changes written out above, even if dirstate is never
1149 # changed after this
1153 # changed after this
1150 tr.addfilegenerator('dirstate', (self._filename,),
1154 tr.addfilegenerator('dirstate', (self._filename,),
1151 self._writedirstate, location='plain')
1155 self._writedirstate, location='plain')
1152
1156
1153 # ensure that pending file written above is unlinked at
1157 # ensure that pending file written above is unlinked at
1154 # failure, even if tr.writepending isn't invoked until the
1158 # failure, even if tr.writepending isn't invoked until the
1155 # end of this transaction
1159 # end of this transaction
1156 tr.registertmp(filename, location='plain')
1160 tr.registertmp(filename, location='plain')
1157
1161
1158 self._opener.write(filename + suffix, self._opener.tryread(filename))
1162 self._opener.write(filename + suffix, self._opener.tryread(filename))
1159
1163
1160 def _restorebackup(self, tr, suffix):
1164 def _restorebackup(self, tr, suffix):
1161 '''Restore dirstate by backup file with suffix'''
1165 '''Restore dirstate by backup file with suffix'''
1162 # this "invalidate()" prevents "wlock.release()" from writing
1166 # this "invalidate()" prevents "wlock.release()" from writing
1163 # changes of dirstate out after restoring from backup file
1167 # changes of dirstate out after restoring from backup file
1164 self.invalidate()
1168 self.invalidate()
1165 filename = self._actualfilename(tr)
1169 filename = self._actualfilename(tr)
1166 self._opener.rename(filename + suffix, filename)
1170 self._opener.rename(filename + suffix, filename)
1167
1171
1168 def _clearbackup(self, tr, suffix):
1172 def _clearbackup(self, tr, suffix):
1169 '''Clear backup file with suffix'''
1173 '''Clear backup file with suffix'''
1170 filename = self._actualfilename(tr)
1174 filename = self._actualfilename(tr)
1171 self._opener.unlink(filename + suffix)
1175 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now