##// END OF EJS Templates
dirstate: add a function to compute non-normal entries from the dmap...
Laurent Charignon -
r27588:714849ba default
parent child Browse files
Show More
@@ -1,1193 +1,1198 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 osutil,
20 osutil,
21 parsers,
21 parsers,
22 pathutil,
22 pathutil,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 propertycache = util.propertycache
27 propertycache = util.propertycache
28 filecache = scmutil.filecache
28 filecache = scmutil.filecache
29 _rangemask = 0x7fffffff
29 _rangemask = 0x7fffffff
30
30
31 dirstatetuple = parsers.dirstatetuple
31 dirstatetuple = parsers.dirstatetuple
32
32
33 class repocache(filecache):
33 class repocache(filecache):
34 """filecache for files in .hg/"""
34 """filecache for files in .hg/"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj._opener.join(fname)
36 return obj._opener.join(fname)
37
37
38 class rootcache(filecache):
38 class rootcache(filecache):
39 """filecache for files in the repository root"""
39 """filecache for files in the repository root"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._join(fname)
41 return obj._join(fname)
42
42
43 def _getfsnow(vfs):
43 def _getfsnow(vfs):
44 '''Get "now" timestamp on filesystem'''
44 '''Get "now" timestamp on filesystem'''
45 tmpfd, tmpname = vfs.mkstemp()
45 tmpfd, tmpname = vfs.mkstemp()
46 try:
46 try:
47 return os.fstat(tmpfd).st_mtime
47 return os.fstat(tmpfd).st_mtime
48 finally:
48 finally:
49 os.close(tmpfd)
49 os.close(tmpfd)
50 vfs.unlink(tmpname)
50 vfs.unlink(tmpname)
51
51
52 def nonnormalentries(dmap):
53 '''Compute the nonnormal dirstate entries from the dmap'''
54 return set(fname for fname, e in dmap.iteritems()
55 if e[0] != 'n' or e[3] == -1)
56
52 def _trypending(root, vfs, filename):
57 def _trypending(root, vfs, filename):
53 '''Open file to be read according to HG_PENDING environment variable
58 '''Open file to be read according to HG_PENDING environment variable
54
59
55 This opens '.pending' of specified 'filename' only when HG_PENDING
60 This opens '.pending' of specified 'filename' only when HG_PENDING
56 is equal to 'root'.
61 is equal to 'root'.
57
62
58 This returns '(fp, is_pending_opened)' tuple.
63 This returns '(fp, is_pending_opened)' tuple.
59 '''
64 '''
60 if root == os.environ.get('HG_PENDING'):
65 if root == os.environ.get('HG_PENDING'):
61 try:
66 try:
62 return (vfs('%s.pending' % filename), True)
67 return (vfs('%s.pending' % filename), True)
63 except IOError as inst:
68 except IOError as inst:
64 if inst.errno != errno.ENOENT:
69 if inst.errno != errno.ENOENT:
65 raise
70 raise
66 return (vfs(filename), False)
71 return (vfs(filename), False)
67
72
68 class dirstate(object):
73 class dirstate(object):
69
74
70 def __init__(self, opener, ui, root, validate):
75 def __init__(self, opener, ui, root, validate):
71 '''Create a new dirstate object.
76 '''Create a new dirstate object.
72
77
73 opener is an open()-like callable that can be used to open the
78 opener is an open()-like callable that can be used to open the
74 dirstate file; root is the root of the directory tracked by
79 dirstate file; root is the root of the directory tracked by
75 the dirstate.
80 the dirstate.
76 '''
81 '''
77 self._opener = opener
82 self._opener = opener
78 self._validate = validate
83 self._validate = validate
79 self._root = root
84 self._root = root
80 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
81 # UNC path pointing to root share (issue4557)
86 # UNC path pointing to root share (issue4557)
82 self._rootdir = pathutil.normasprefix(root)
87 self._rootdir = pathutil.normasprefix(root)
83 # internal config: ui.forcecwd
88 # internal config: ui.forcecwd
84 forcecwd = ui.config('ui', 'forcecwd')
89 forcecwd = ui.config('ui', 'forcecwd')
85 if forcecwd:
90 if forcecwd:
86 self._cwd = forcecwd
91 self._cwd = forcecwd
87 self._dirty = False
92 self._dirty = False
88 self._dirtypl = False
93 self._dirtypl = False
89 self._lastnormaltime = 0
94 self._lastnormaltime = 0
90 self._ui = ui
95 self._ui = ui
91 self._filecache = {}
96 self._filecache = {}
92 self._parentwriters = 0
97 self._parentwriters = 0
93 self._filename = 'dirstate'
98 self._filename = 'dirstate'
94 self._pendingfilename = '%s.pending' % self._filename
99 self._pendingfilename = '%s.pending' % self._filename
95
100
96 # for consistent view between _pl() and _read() invocations
101 # for consistent view between _pl() and _read() invocations
97 self._pendingmode = None
102 self._pendingmode = None
98
103
99 def beginparentchange(self):
104 def beginparentchange(self):
100 '''Marks the beginning of a set of changes that involve changing
105 '''Marks the beginning of a set of changes that involve changing
101 the dirstate parents. If there is an exception during this time,
106 the dirstate parents. If there is an exception during this time,
102 the dirstate will not be written when the wlock is released. This
107 the dirstate will not be written when the wlock is released. This
103 prevents writing an incoherent dirstate where the parent doesn't
108 prevents writing an incoherent dirstate where the parent doesn't
104 match the contents.
109 match the contents.
105 '''
110 '''
106 self._parentwriters += 1
111 self._parentwriters += 1
107
112
108 def endparentchange(self):
113 def endparentchange(self):
109 '''Marks the end of a set of changes that involve changing the
114 '''Marks the end of a set of changes that involve changing the
110 dirstate parents. Once all parent changes have been marked done,
115 dirstate parents. Once all parent changes have been marked done,
111 the wlock will be free to write the dirstate on release.
116 the wlock will be free to write the dirstate on release.
112 '''
117 '''
113 if self._parentwriters > 0:
118 if self._parentwriters > 0:
114 self._parentwriters -= 1
119 self._parentwriters -= 1
115
120
116 def pendingparentchange(self):
121 def pendingparentchange(self):
117 '''Returns true if the dirstate is in the middle of a set of changes
122 '''Returns true if the dirstate is in the middle of a set of changes
118 that modify the dirstate parent.
123 that modify the dirstate parent.
119 '''
124 '''
120 return self._parentwriters > 0
125 return self._parentwriters > 0
121
126
122 @propertycache
127 @propertycache
123 def _map(self):
128 def _map(self):
124 '''Return the dirstate contents as a map from filename to
129 '''Return the dirstate contents as a map from filename to
125 (state, mode, size, time).'''
130 (state, mode, size, time).'''
126 self._read()
131 self._read()
127 return self._map
132 return self._map
128
133
129 @propertycache
134 @propertycache
130 def _copymap(self):
135 def _copymap(self):
131 self._read()
136 self._read()
132 return self._copymap
137 return self._copymap
133
138
134 @propertycache
139 @propertycache
135 def _filefoldmap(self):
140 def _filefoldmap(self):
136 try:
141 try:
137 makefilefoldmap = parsers.make_file_foldmap
142 makefilefoldmap = parsers.make_file_foldmap
138 except AttributeError:
143 except AttributeError:
139 pass
144 pass
140 else:
145 else:
141 return makefilefoldmap(self._map, util.normcasespec,
146 return makefilefoldmap(self._map, util.normcasespec,
142 util.normcasefallback)
147 util.normcasefallback)
143
148
144 f = {}
149 f = {}
145 normcase = util.normcase
150 normcase = util.normcase
146 for name, s in self._map.iteritems():
151 for name, s in self._map.iteritems():
147 if s[0] != 'r':
152 if s[0] != 'r':
148 f[normcase(name)] = name
153 f[normcase(name)] = name
149 f['.'] = '.' # prevents useless util.fspath() invocation
154 f['.'] = '.' # prevents useless util.fspath() invocation
150 return f
155 return f
151
156
152 @propertycache
157 @propertycache
153 def _dirfoldmap(self):
158 def _dirfoldmap(self):
154 f = {}
159 f = {}
155 normcase = util.normcase
160 normcase = util.normcase
156 for name in self._dirs:
161 for name in self._dirs:
157 f[normcase(name)] = name
162 f[normcase(name)] = name
158 return f
163 return f
159
164
160 @repocache('branch')
165 @repocache('branch')
161 def _branch(self):
166 def _branch(self):
162 try:
167 try:
163 return self._opener.read("branch").strip() or "default"
168 return self._opener.read("branch").strip() or "default"
164 except IOError as inst:
169 except IOError as inst:
165 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
166 raise
171 raise
167 return "default"
172 return "default"
168
173
169 @propertycache
174 @propertycache
170 def _pl(self):
175 def _pl(self):
171 try:
176 try:
172 fp = self._opendirstatefile()
177 fp = self._opendirstatefile()
173 st = fp.read(40)
178 st = fp.read(40)
174 fp.close()
179 fp.close()
175 l = len(st)
180 l = len(st)
176 if l == 40:
181 if l == 40:
177 return st[:20], st[20:40]
182 return st[:20], st[20:40]
178 elif l > 0 and l < 40:
183 elif l > 0 and l < 40:
179 raise error.Abort(_('working directory state appears damaged!'))
184 raise error.Abort(_('working directory state appears damaged!'))
180 except IOError as err:
185 except IOError as err:
181 if err.errno != errno.ENOENT:
186 if err.errno != errno.ENOENT:
182 raise
187 raise
183 return [nullid, nullid]
188 return [nullid, nullid]
184
189
185 @propertycache
190 @propertycache
186 def _dirs(self):
191 def _dirs(self):
187 return util.dirs(self._map, 'r')
192 return util.dirs(self._map, 'r')
188
193
189 def dirs(self):
194 def dirs(self):
190 return self._dirs
195 return self._dirs
191
196
192 @rootcache('.hgignore')
197 @rootcache('.hgignore')
193 def _ignore(self):
198 def _ignore(self):
194 files = []
199 files = []
195 if os.path.exists(self._join('.hgignore')):
200 if os.path.exists(self._join('.hgignore')):
196 files.append(self._join('.hgignore'))
201 files.append(self._join('.hgignore'))
197 for name, path in self._ui.configitems("ui"):
202 for name, path in self._ui.configitems("ui"):
198 if name == 'ignore' or name.startswith('ignore.'):
203 if name == 'ignore' or name.startswith('ignore.'):
199 # we need to use os.path.join here rather than self._join
204 # we need to use os.path.join here rather than self._join
200 # because path is arbitrary and user-specified
205 # because path is arbitrary and user-specified
201 files.append(os.path.join(self._rootdir, util.expandpath(path)))
206 files.append(os.path.join(self._rootdir, util.expandpath(path)))
202
207
203 if not files:
208 if not files:
204 return util.never
209 return util.never
205
210
206 pats = ['include:%s' % f for f in files]
211 pats = ['include:%s' % f for f in files]
207 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
208
213
209 @propertycache
214 @propertycache
210 def _slash(self):
215 def _slash(self):
211 return self._ui.configbool('ui', 'slash') and os.sep != '/'
216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
212
217
213 @propertycache
218 @propertycache
214 def _checklink(self):
219 def _checklink(self):
215 return util.checklink(self._root)
220 return util.checklink(self._root)
216
221
217 @propertycache
222 @propertycache
218 def _checkexec(self):
223 def _checkexec(self):
219 return util.checkexec(self._root)
224 return util.checkexec(self._root)
220
225
221 @propertycache
226 @propertycache
222 def _checkcase(self):
227 def _checkcase(self):
223 return not util.checkcase(self._join('.hg'))
228 return not util.checkcase(self._join('.hg'))
224
229
225 def _join(self, f):
230 def _join(self, f):
226 # much faster than os.path.join()
231 # much faster than os.path.join()
227 # it's safe because f is always a relative path
232 # it's safe because f is always a relative path
228 return self._rootdir + f
233 return self._rootdir + f
229
234
230 def flagfunc(self, buildfallback):
235 def flagfunc(self, buildfallback):
231 if self._checklink and self._checkexec:
236 if self._checklink and self._checkexec:
232 def f(x):
237 def f(x):
233 try:
238 try:
234 st = os.lstat(self._join(x))
239 st = os.lstat(self._join(x))
235 if util.statislink(st):
240 if util.statislink(st):
236 return 'l'
241 return 'l'
237 if util.statisexec(st):
242 if util.statisexec(st):
238 return 'x'
243 return 'x'
239 except OSError:
244 except OSError:
240 pass
245 pass
241 return ''
246 return ''
242 return f
247 return f
243
248
244 fallback = buildfallback()
249 fallback = buildfallback()
245 if self._checklink:
250 if self._checklink:
246 def f(x):
251 def f(x):
247 if os.path.islink(self._join(x)):
252 if os.path.islink(self._join(x)):
248 return 'l'
253 return 'l'
249 if 'x' in fallback(x):
254 if 'x' in fallback(x):
250 return 'x'
255 return 'x'
251 return ''
256 return ''
252 return f
257 return f
253 if self._checkexec:
258 if self._checkexec:
254 def f(x):
259 def f(x):
255 if 'l' in fallback(x):
260 if 'l' in fallback(x):
256 return 'l'
261 return 'l'
257 if util.isexec(self._join(x)):
262 if util.isexec(self._join(x)):
258 return 'x'
263 return 'x'
259 return ''
264 return ''
260 return f
265 return f
261 else:
266 else:
262 return fallback
267 return fallback
263
268
264 @propertycache
269 @propertycache
265 def _cwd(self):
270 def _cwd(self):
266 return os.getcwd()
271 return os.getcwd()
267
272
268 def getcwd(self):
273 def getcwd(self):
269 '''Return the path from which a canonical path is calculated.
274 '''Return the path from which a canonical path is calculated.
270
275
271 This path should be used to resolve file patterns or to convert
276 This path should be used to resolve file patterns or to convert
272 canonical paths back to file paths for display. It shouldn't be
277 canonical paths back to file paths for display. It shouldn't be
273 used to get real file paths. Use vfs functions instead.
278 used to get real file paths. Use vfs functions instead.
274 '''
279 '''
275 cwd = self._cwd
280 cwd = self._cwd
276 if cwd == self._root:
281 if cwd == self._root:
277 return ''
282 return ''
278 # self._root ends with a path separator if self._root is '/' or 'C:\'
283 # self._root ends with a path separator if self._root is '/' or 'C:\'
279 rootsep = self._root
284 rootsep = self._root
280 if not util.endswithsep(rootsep):
285 if not util.endswithsep(rootsep):
281 rootsep += os.sep
286 rootsep += os.sep
282 if cwd.startswith(rootsep):
287 if cwd.startswith(rootsep):
283 return cwd[len(rootsep):]
288 return cwd[len(rootsep):]
284 else:
289 else:
285 # we're outside the repo. return an absolute path.
290 # we're outside the repo. return an absolute path.
286 return cwd
291 return cwd
287
292
288 def pathto(self, f, cwd=None):
293 def pathto(self, f, cwd=None):
289 if cwd is None:
294 if cwd is None:
290 cwd = self.getcwd()
295 cwd = self.getcwd()
291 path = util.pathto(self._root, cwd, f)
296 path = util.pathto(self._root, cwd, f)
292 if self._slash:
297 if self._slash:
293 return util.pconvert(path)
298 return util.pconvert(path)
294 return path
299 return path
295
300
296 def __getitem__(self, key):
301 def __getitem__(self, key):
297 '''Return the current state of key (a filename) in the dirstate.
302 '''Return the current state of key (a filename) in the dirstate.
298
303
299 States are:
304 States are:
300 n normal
305 n normal
301 m needs merging
306 m needs merging
302 r marked for removal
307 r marked for removal
303 a marked for addition
308 a marked for addition
304 ? not tracked
309 ? not tracked
305 '''
310 '''
306 return self._map.get(key, ("?",))[0]
311 return self._map.get(key, ("?",))[0]
307
312
308 def __contains__(self, key):
313 def __contains__(self, key):
309 return key in self._map
314 return key in self._map
310
315
311 def __iter__(self):
316 def __iter__(self):
312 for x in sorted(self._map):
317 for x in sorted(self._map):
313 yield x
318 yield x
314
319
315 def iteritems(self):
320 def iteritems(self):
316 return self._map.iteritems()
321 return self._map.iteritems()
317
322
318 def parents(self):
323 def parents(self):
319 return [self._validate(p) for p in self._pl]
324 return [self._validate(p) for p in self._pl]
320
325
321 def p1(self):
326 def p1(self):
322 return self._validate(self._pl[0])
327 return self._validate(self._pl[0])
323
328
324 def p2(self):
329 def p2(self):
325 return self._validate(self._pl[1])
330 return self._validate(self._pl[1])
326
331
327 def branch(self):
332 def branch(self):
328 return encoding.tolocal(self._branch)
333 return encoding.tolocal(self._branch)
329
334
330 def setparents(self, p1, p2=nullid):
335 def setparents(self, p1, p2=nullid):
331 """Set dirstate parents to p1 and p2.
336 """Set dirstate parents to p1 and p2.
332
337
333 When moving from two parents to one, 'm' merged entries a
338 When moving from two parents to one, 'm' merged entries a
334 adjusted to normal and previous copy records discarded and
339 adjusted to normal and previous copy records discarded and
335 returned by the call.
340 returned by the call.
336
341
337 See localrepo.setparents()
342 See localrepo.setparents()
338 """
343 """
339 if self._parentwriters == 0:
344 if self._parentwriters == 0:
340 raise ValueError("cannot set dirstate parent without "
345 raise ValueError("cannot set dirstate parent without "
341 "calling dirstate.beginparentchange")
346 "calling dirstate.beginparentchange")
342
347
343 self._dirty = self._dirtypl = True
348 self._dirty = self._dirtypl = True
344 oldp2 = self._pl[1]
349 oldp2 = self._pl[1]
345 self._pl = p1, p2
350 self._pl = p1, p2
346 copies = {}
351 copies = {}
347 if oldp2 != nullid and p2 == nullid:
352 if oldp2 != nullid and p2 == nullid:
348 for f, s in self._map.iteritems():
353 for f, s in self._map.iteritems():
349 # Discard 'm' markers when moving away from a merge state
354 # Discard 'm' markers when moving away from a merge state
350 if s[0] == 'm':
355 if s[0] == 'm':
351 if f in self._copymap:
356 if f in self._copymap:
352 copies[f] = self._copymap[f]
357 copies[f] = self._copymap[f]
353 self.normallookup(f)
358 self.normallookup(f)
354 # Also fix up otherparent markers
359 # Also fix up otherparent markers
355 elif s[0] == 'n' and s[2] == -2:
360 elif s[0] == 'n' and s[2] == -2:
356 if f in self._copymap:
361 if f in self._copymap:
357 copies[f] = self._copymap[f]
362 copies[f] = self._copymap[f]
358 self.add(f)
363 self.add(f)
359 return copies
364 return copies
360
365
361 def setbranch(self, branch):
366 def setbranch(self, branch):
362 self._branch = encoding.fromlocal(branch)
367 self._branch = encoding.fromlocal(branch)
363 f = self._opener('branch', 'w', atomictemp=True)
368 f = self._opener('branch', 'w', atomictemp=True)
364 try:
369 try:
365 f.write(self._branch + '\n')
370 f.write(self._branch + '\n')
366 f.close()
371 f.close()
367
372
368 # make sure filecache has the correct stat info for _branch after
373 # make sure filecache has the correct stat info for _branch after
369 # replacing the underlying file
374 # replacing the underlying file
370 ce = self._filecache['_branch']
375 ce = self._filecache['_branch']
371 if ce:
376 if ce:
372 ce.refresh()
377 ce.refresh()
373 except: # re-raises
378 except: # re-raises
374 f.discard()
379 f.discard()
375 raise
380 raise
376
381
377 def _opendirstatefile(self):
382 def _opendirstatefile(self):
378 fp, mode = _trypending(self._root, self._opener, self._filename)
383 fp, mode = _trypending(self._root, self._opener, self._filename)
379 if self._pendingmode is not None and self._pendingmode != mode:
384 if self._pendingmode is not None and self._pendingmode != mode:
380 fp.close()
385 fp.close()
381 raise error.Abort(_('working directory state may be '
386 raise error.Abort(_('working directory state may be '
382 'changed parallelly'))
387 'changed parallelly'))
383 self._pendingmode = mode
388 self._pendingmode = mode
384 return fp
389 return fp
385
390
386 def _read(self):
391 def _read(self):
387 self._map = {}
392 self._map = {}
388 self._copymap = {}
393 self._copymap = {}
389 try:
394 try:
390 fp = self._opendirstatefile()
395 fp = self._opendirstatefile()
391 try:
396 try:
392 st = fp.read()
397 st = fp.read()
393 finally:
398 finally:
394 fp.close()
399 fp.close()
395 except IOError as err:
400 except IOError as err:
396 if err.errno != errno.ENOENT:
401 if err.errno != errno.ENOENT:
397 raise
402 raise
398 return
403 return
399 if not st:
404 if not st:
400 return
405 return
401
406
402 if util.safehasattr(parsers, 'dict_new_presized'):
407 if util.safehasattr(parsers, 'dict_new_presized'):
403 # Make an estimate of the number of files in the dirstate based on
408 # Make an estimate of the number of files in the dirstate based on
404 # its size. From a linear regression on a set of real-world repos,
409 # its size. From a linear regression on a set of real-world repos,
405 # all over 10,000 files, the size of a dirstate entry is 85
410 # all over 10,000 files, the size of a dirstate entry is 85
406 # bytes. The cost of resizing is significantly higher than the cost
411 # bytes. The cost of resizing is significantly higher than the cost
407 # of filling in a larger presized dict, so subtract 20% from the
412 # of filling in a larger presized dict, so subtract 20% from the
408 # size.
413 # size.
409 #
414 #
410 # This heuristic is imperfect in many ways, so in a future dirstate
415 # This heuristic is imperfect in many ways, so in a future dirstate
411 # format update it makes sense to just record the number of entries
416 # format update it makes sense to just record the number of entries
412 # on write.
417 # on write.
413 self._map = parsers.dict_new_presized(len(st) / 71)
418 self._map = parsers.dict_new_presized(len(st) / 71)
414
419
415 # Python's garbage collector triggers a GC each time a certain number
420 # Python's garbage collector triggers a GC each time a certain number
416 # of container objects (the number being defined by
421 # of container objects (the number being defined by
417 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
418 # for each file in the dirstate. The C version then immediately marks
423 # for each file in the dirstate. The C version then immediately marks
419 # them as not to be tracked by the collector. However, this has no
424 # them as not to be tracked by the collector. However, this has no
420 # effect on when GCs are triggered, only on what objects the GC looks
425 # effect on when GCs are triggered, only on what objects the GC looks
421 # into. This means that O(number of files) GCs are unavoidable.
426 # into. This means that O(number of files) GCs are unavoidable.
422 # Depending on when in the process's lifetime the dirstate is parsed,
427 # Depending on when in the process's lifetime the dirstate is parsed,
423 # this can get very expensive. As a workaround, disable GC while
428 # this can get very expensive. As a workaround, disable GC while
424 # parsing the dirstate.
429 # parsing the dirstate.
425 #
430 #
426 # (we cannot decorate the function directly since it is in a C module)
431 # (we cannot decorate the function directly since it is in a C module)
427 parse_dirstate = util.nogc(parsers.parse_dirstate)
432 parse_dirstate = util.nogc(parsers.parse_dirstate)
428 p = parse_dirstate(self._map, self._copymap, st)
433 p = parse_dirstate(self._map, self._copymap, st)
429 if not self._dirtypl:
434 if not self._dirtypl:
430 self._pl = p
435 self._pl = p
431
436
432 def invalidate(self):
437 def invalidate(self):
433 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
434 "_pl", "_dirs", "_ignore"):
439 "_pl", "_dirs", "_ignore"):
435 if a in self.__dict__:
440 if a in self.__dict__:
436 delattr(self, a)
441 delattr(self, a)
437 self._lastnormaltime = 0
442 self._lastnormaltime = 0
438 self._dirty = False
443 self._dirty = False
439 self._parentwriters = 0
444 self._parentwriters = 0
440
445
441 def copy(self, source, dest):
446 def copy(self, source, dest):
442 """Mark dest as a copy of source. Unmark dest if source is None."""
447 """Mark dest as a copy of source. Unmark dest if source is None."""
443 if source == dest:
448 if source == dest:
444 return
449 return
445 self._dirty = True
450 self._dirty = True
446 if source is not None:
451 if source is not None:
447 self._copymap[dest] = source
452 self._copymap[dest] = source
448 elif dest in self._copymap:
453 elif dest in self._copymap:
449 del self._copymap[dest]
454 del self._copymap[dest]
450
455
451 def copied(self, file):
456 def copied(self, file):
452 return self._copymap.get(file, None)
457 return self._copymap.get(file, None)
453
458
454 def copies(self):
459 def copies(self):
455 return self._copymap
460 return self._copymap
456
461
457 def _droppath(self, f):
462 def _droppath(self, f):
458 if self[f] not in "?r" and "_dirs" in self.__dict__:
463 if self[f] not in "?r" and "_dirs" in self.__dict__:
459 self._dirs.delpath(f)
464 self._dirs.delpath(f)
460
465
461 if "_filefoldmap" in self.__dict__:
466 if "_filefoldmap" in self.__dict__:
462 normed = util.normcase(f)
467 normed = util.normcase(f)
463 if normed in self._filefoldmap:
468 if normed in self._filefoldmap:
464 del self._filefoldmap[normed]
469 del self._filefoldmap[normed]
465
470
466 def _addpath(self, f, state, mode, size, mtime):
471 def _addpath(self, f, state, mode, size, mtime):
467 oldstate = self[f]
472 oldstate = self[f]
468 if state == 'a' or oldstate == 'r':
473 if state == 'a' or oldstate == 'r':
469 scmutil.checkfilename(f)
474 scmutil.checkfilename(f)
470 if f in self._dirs:
475 if f in self._dirs:
471 raise error.Abort(_('directory %r already in dirstate') % f)
476 raise error.Abort(_('directory %r already in dirstate') % f)
472 # shadows
477 # shadows
473 for d in util.finddirs(f):
478 for d in util.finddirs(f):
474 if d in self._dirs:
479 if d in self._dirs:
475 break
480 break
476 if d in self._map and self[d] != 'r':
481 if d in self._map and self[d] != 'r':
477 raise error.Abort(
482 raise error.Abort(
478 _('file %r in dirstate clashes with %r') % (d, f))
483 _('file %r in dirstate clashes with %r') % (d, f))
479 if oldstate in "?r" and "_dirs" in self.__dict__:
484 if oldstate in "?r" and "_dirs" in self.__dict__:
480 self._dirs.addpath(f)
485 self._dirs.addpath(f)
481 self._dirty = True
486 self._dirty = True
482 self._map[f] = dirstatetuple(state, mode, size, mtime)
487 self._map[f] = dirstatetuple(state, mode, size, mtime)
483
488
484 def normal(self, f):
489 def normal(self, f):
485 '''Mark a file normal and clean.'''
490 '''Mark a file normal and clean.'''
486 s = os.lstat(self._join(f))
491 s = os.lstat(self._join(f))
487 mtime = s.st_mtime
492 mtime = s.st_mtime
488 self._addpath(f, 'n', s.st_mode,
493 self._addpath(f, 'n', s.st_mode,
489 s.st_size & _rangemask, mtime & _rangemask)
494 s.st_size & _rangemask, mtime & _rangemask)
490 if f in self._copymap:
495 if f in self._copymap:
491 del self._copymap[f]
496 del self._copymap[f]
492 if mtime > self._lastnormaltime:
497 if mtime > self._lastnormaltime:
493 # Remember the most recent modification timeslot for status(),
498 # Remember the most recent modification timeslot for status(),
494 # to make sure we won't miss future size-preserving file content
499 # to make sure we won't miss future size-preserving file content
495 # modifications that happen within the same timeslot.
500 # modifications that happen within the same timeslot.
496 self._lastnormaltime = mtime
501 self._lastnormaltime = mtime
497
502
498 def normallookup(self, f):
503 def normallookup(self, f):
499 '''Mark a file normal, but possibly dirty.'''
504 '''Mark a file normal, but possibly dirty.'''
500 if self._pl[1] != nullid and f in self._map:
505 if self._pl[1] != nullid and f in self._map:
501 # if there is a merge going on and the file was either
506 # if there is a merge going on and the file was either
502 # in state 'm' (-1) or coming from other parent (-2) before
507 # in state 'm' (-1) or coming from other parent (-2) before
503 # being removed, restore that state.
508 # being removed, restore that state.
504 entry = self._map[f]
509 entry = self._map[f]
505 if entry[0] == 'r' and entry[2] in (-1, -2):
510 if entry[0] == 'r' and entry[2] in (-1, -2):
506 source = self._copymap.get(f)
511 source = self._copymap.get(f)
507 if entry[2] == -1:
512 if entry[2] == -1:
508 self.merge(f)
513 self.merge(f)
509 elif entry[2] == -2:
514 elif entry[2] == -2:
510 self.otherparent(f)
515 self.otherparent(f)
511 if source:
516 if source:
512 self.copy(source, f)
517 self.copy(source, f)
513 return
518 return
514 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
519 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
515 return
520 return
516 self._addpath(f, 'n', 0, -1, -1)
521 self._addpath(f, 'n', 0, -1, -1)
517 if f in self._copymap:
522 if f in self._copymap:
518 del self._copymap[f]
523 del self._copymap[f]
519
524
520 def otherparent(self, f):
525 def otherparent(self, f):
521 '''Mark as coming from the other parent, always dirty.'''
526 '''Mark as coming from the other parent, always dirty.'''
522 if self._pl[1] == nullid:
527 if self._pl[1] == nullid:
523 raise error.Abort(_("setting %r to other parent "
528 raise error.Abort(_("setting %r to other parent "
524 "only allowed in merges") % f)
529 "only allowed in merges") % f)
525 if f in self and self[f] == 'n':
530 if f in self and self[f] == 'n':
526 # merge-like
531 # merge-like
527 self._addpath(f, 'm', 0, -2, -1)
532 self._addpath(f, 'm', 0, -2, -1)
528 else:
533 else:
529 # add-like
534 # add-like
530 self._addpath(f, 'n', 0, -2, -1)
535 self._addpath(f, 'n', 0, -2, -1)
531
536
532 if f in self._copymap:
537 if f in self._copymap:
533 del self._copymap[f]
538 del self._copymap[f]
534
539
535 def add(self, f):
540 def add(self, f):
536 '''Mark a file added.'''
541 '''Mark a file added.'''
537 self._addpath(f, 'a', 0, -1, -1)
542 self._addpath(f, 'a', 0, -1, -1)
538 if f in self._copymap:
543 if f in self._copymap:
539 del self._copymap[f]
544 del self._copymap[f]
540
545
541 def remove(self, f):
546 def remove(self, f):
542 '''Mark a file removed.'''
547 '''Mark a file removed.'''
543 self._dirty = True
548 self._dirty = True
544 self._droppath(f)
549 self._droppath(f)
545 size = 0
550 size = 0
546 if self._pl[1] != nullid and f in self._map:
551 if self._pl[1] != nullid and f in self._map:
547 # backup the previous state
552 # backup the previous state
548 entry = self._map[f]
553 entry = self._map[f]
549 if entry[0] == 'm': # merge
554 if entry[0] == 'm': # merge
550 size = -1
555 size = -1
551 elif entry[0] == 'n' and entry[2] == -2: # other parent
556 elif entry[0] == 'n' and entry[2] == -2: # other parent
552 size = -2
557 size = -2
553 self._map[f] = dirstatetuple('r', 0, size, 0)
558 self._map[f] = dirstatetuple('r', 0, size, 0)
554 if size == 0 and f in self._copymap:
559 if size == 0 and f in self._copymap:
555 del self._copymap[f]
560 del self._copymap[f]
556
561
557 def merge(self, f):
562 def merge(self, f):
558 '''Mark a file merged.'''
563 '''Mark a file merged.'''
559 if self._pl[1] == nullid:
564 if self._pl[1] == nullid:
560 return self.normallookup(f)
565 return self.normallookup(f)
561 return self.otherparent(f)
566 return self.otherparent(f)
562
567
563 def drop(self, f):
568 def drop(self, f):
564 '''Drop a file from the dirstate'''
569 '''Drop a file from the dirstate'''
565 if f in self._map:
570 if f in self._map:
566 self._dirty = True
571 self._dirty = True
567 self._droppath(f)
572 self._droppath(f)
568 del self._map[f]
573 del self._map[f]
569
574
570 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
575 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
571 if exists is None:
576 if exists is None:
572 exists = os.path.lexists(os.path.join(self._root, path))
577 exists = os.path.lexists(os.path.join(self._root, path))
573 if not exists:
578 if not exists:
574 # Maybe a path component exists
579 # Maybe a path component exists
575 if not ignoremissing and '/' in path:
580 if not ignoremissing and '/' in path:
576 d, f = path.rsplit('/', 1)
581 d, f = path.rsplit('/', 1)
577 d = self._normalize(d, False, ignoremissing, None)
582 d = self._normalize(d, False, ignoremissing, None)
578 folded = d + "/" + f
583 folded = d + "/" + f
579 else:
584 else:
580 # No path components, preserve original case
585 # No path components, preserve original case
581 folded = path
586 folded = path
582 else:
587 else:
583 # recursively normalize leading directory components
588 # recursively normalize leading directory components
584 # against dirstate
589 # against dirstate
585 if '/' in normed:
590 if '/' in normed:
586 d, f = normed.rsplit('/', 1)
591 d, f = normed.rsplit('/', 1)
587 d = self._normalize(d, False, ignoremissing, True)
592 d = self._normalize(d, False, ignoremissing, True)
588 r = self._root + "/" + d
593 r = self._root + "/" + d
589 folded = d + "/" + util.fspath(f, r)
594 folded = d + "/" + util.fspath(f, r)
590 else:
595 else:
591 folded = util.fspath(normed, self._root)
596 folded = util.fspath(normed, self._root)
592 storemap[normed] = folded
597 storemap[normed] = folded
593
598
594 return folded
599 return folded
595
600
596 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
601 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
597 normed = util.normcase(path)
602 normed = util.normcase(path)
598 folded = self._filefoldmap.get(normed, None)
603 folded = self._filefoldmap.get(normed, None)
599 if folded is None:
604 if folded is None:
600 if isknown:
605 if isknown:
601 folded = path
606 folded = path
602 else:
607 else:
603 folded = self._discoverpath(path, normed, ignoremissing, exists,
608 folded = self._discoverpath(path, normed, ignoremissing, exists,
604 self._filefoldmap)
609 self._filefoldmap)
605 return folded
610 return folded
606
611
607 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
608 normed = util.normcase(path)
613 normed = util.normcase(path)
609 folded = self._filefoldmap.get(normed, None)
614 folded = self._filefoldmap.get(normed, None)
610 if folded is None:
615 if folded is None:
611 folded = self._dirfoldmap.get(normed, None)
616 folded = self._dirfoldmap.get(normed, None)
612 if folded is None:
617 if folded is None:
613 if isknown:
618 if isknown:
614 folded = path
619 folded = path
615 else:
620 else:
616 # store discovered result in dirfoldmap so that future
621 # store discovered result in dirfoldmap so that future
617 # normalizefile calls don't start matching directories
622 # normalizefile calls don't start matching directories
618 folded = self._discoverpath(path, normed, ignoremissing, exists,
623 folded = self._discoverpath(path, normed, ignoremissing, exists,
619 self._dirfoldmap)
624 self._dirfoldmap)
620 return folded
625 return folded
621
626
622 def normalize(self, path, isknown=False, ignoremissing=False):
627 def normalize(self, path, isknown=False, ignoremissing=False):
623 '''
628 '''
624 normalize the case of a pathname when on a casefolding filesystem
629 normalize the case of a pathname when on a casefolding filesystem
625
630
626 isknown specifies whether the filename came from walking the
631 isknown specifies whether the filename came from walking the
627 disk, to avoid extra filesystem access.
632 disk, to avoid extra filesystem access.
628
633
629 If ignoremissing is True, missing path are returned
634 If ignoremissing is True, missing path are returned
630 unchanged. Otherwise, we try harder to normalize possibly
635 unchanged. Otherwise, we try harder to normalize possibly
631 existing path components.
636 existing path components.
632
637
633 The normalized case is determined based on the following precedence:
638 The normalized case is determined based on the following precedence:
634
639
635 - version of name already stored in the dirstate
640 - version of name already stored in the dirstate
636 - version of name stored on disk
641 - version of name stored on disk
637 - version provided via command arguments
642 - version provided via command arguments
638 '''
643 '''
639
644
640 if self._checkcase:
645 if self._checkcase:
641 return self._normalize(path, isknown, ignoremissing)
646 return self._normalize(path, isknown, ignoremissing)
642 return path
647 return path
643
648
644 def clear(self):
649 def clear(self):
645 self._map = {}
650 self._map = {}
646 if "_dirs" in self.__dict__:
651 if "_dirs" in self.__dict__:
647 delattr(self, "_dirs")
652 delattr(self, "_dirs")
648 self._copymap = {}
653 self._copymap = {}
649 self._pl = [nullid, nullid]
654 self._pl = [nullid, nullid]
650 self._lastnormaltime = 0
655 self._lastnormaltime = 0
651 self._dirty = True
656 self._dirty = True
652
657
653 def rebuild(self, parent, allfiles, changedfiles=None):
658 def rebuild(self, parent, allfiles, changedfiles=None):
654 if changedfiles is None:
659 if changedfiles is None:
655 # Rebuild entire dirstate
660 # Rebuild entire dirstate
656 changedfiles = allfiles
661 changedfiles = allfiles
657 lastnormaltime = self._lastnormaltime
662 lastnormaltime = self._lastnormaltime
658 self.clear()
663 self.clear()
659 self._lastnormaltime = lastnormaltime
664 self._lastnormaltime = lastnormaltime
660
665
661 for f in changedfiles:
666 for f in changedfiles:
662 mode = 0o666
667 mode = 0o666
663 if f in allfiles and 'x' in allfiles.flags(f):
668 if f in allfiles and 'x' in allfiles.flags(f):
664 mode = 0o777
669 mode = 0o777
665
670
666 if f in allfiles:
671 if f in allfiles:
667 self._map[f] = dirstatetuple('n', mode, -1, 0)
672 self._map[f] = dirstatetuple('n', mode, -1, 0)
668 else:
673 else:
669 self._map.pop(f, None)
674 self._map.pop(f, None)
670
675
671 self._pl = (parent, nullid)
676 self._pl = (parent, nullid)
672 self._dirty = True
677 self._dirty = True
673
678
674 def write(self, tr=False):
679 def write(self, tr=False):
675 if not self._dirty:
680 if not self._dirty:
676 return
681 return
677
682
678 filename = self._filename
683 filename = self._filename
679 if tr is False: # not explicitly specified
684 if tr is False: # not explicitly specified
680 if (self._ui.configbool('devel', 'all-warnings')
685 if (self._ui.configbool('devel', 'all-warnings')
681 or self._ui.configbool('devel', 'check-dirstate-write')):
686 or self._ui.configbool('devel', 'check-dirstate-write')):
682 self._ui.develwarn('use dirstate.write with '
687 self._ui.develwarn('use dirstate.write with '
683 'repo.currenttransaction()')
688 'repo.currenttransaction()')
684
689
685 if self._opener.lexists(self._pendingfilename):
690 if self._opener.lexists(self._pendingfilename):
686 # if pending file already exists, in-memory changes
691 # if pending file already exists, in-memory changes
687 # should be written into it, because it has priority
692 # should be written into it, because it has priority
688 # to '.hg/dirstate' at reading under HG_PENDING mode
693 # to '.hg/dirstate' at reading under HG_PENDING mode
689 filename = self._pendingfilename
694 filename = self._pendingfilename
690 elif tr:
695 elif tr:
691 # 'dirstate.write()' is not only for writing in-memory
696 # 'dirstate.write()' is not only for writing in-memory
692 # changes out, but also for dropping ambiguous timestamp.
697 # changes out, but also for dropping ambiguous timestamp.
693 # delayed writing re-raise "ambiguous timestamp issue".
698 # delayed writing re-raise "ambiguous timestamp issue".
694 # See also the wiki page below for detail:
699 # See also the wiki page below for detail:
695 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
700 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
696
701
697 # emulate dropping timestamp in 'parsers.pack_dirstate'
702 # emulate dropping timestamp in 'parsers.pack_dirstate'
698 now = _getfsnow(self._opener)
703 now = _getfsnow(self._opener)
699 dmap = self._map
704 dmap = self._map
700 for f, e in dmap.iteritems():
705 for f, e in dmap.iteritems():
701 if e[0] == 'n' and e[3] == now:
706 if e[0] == 'n' and e[3] == now:
702 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
707 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
703
708
704 # emulate that all 'dirstate.normal' results are written out
709 # emulate that all 'dirstate.normal' results are written out
705 self._lastnormaltime = 0
710 self._lastnormaltime = 0
706
711
707 # delay writing in-memory changes out
712 # delay writing in-memory changes out
708 tr.addfilegenerator('dirstate', (self._filename,),
713 tr.addfilegenerator('dirstate', (self._filename,),
709 self._writedirstate, location='plain')
714 self._writedirstate, location='plain')
710 return
715 return
711
716
712 st = self._opener(filename, "w", atomictemp=True)
717 st = self._opener(filename, "w", atomictemp=True)
713 self._writedirstate(st)
718 self._writedirstate(st)
714
719
715 def _writedirstate(self, st):
720 def _writedirstate(self, st):
716 # use the modification time of the newly created temporary file as the
721 # use the modification time of the newly created temporary file as the
717 # filesystem's notion of 'now'
722 # filesystem's notion of 'now'
718 now = util.fstat(st).st_mtime & _rangemask
723 now = util.fstat(st).st_mtime & _rangemask
719
724
720 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
725 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
721 # timestamp of each entries in dirstate, because of 'now > mtime'
726 # timestamp of each entries in dirstate, because of 'now > mtime'
722 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
727 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
723 if delaywrite > 0:
728 if delaywrite > 0:
724 # do we have any files to delay for?
729 # do we have any files to delay for?
725 for f, e in self._map.iteritems():
730 for f, e in self._map.iteritems():
726 if e[0] == 'n' and e[3] == now:
731 if e[0] == 'n' and e[3] == now:
727 import time # to avoid useless import
732 import time # to avoid useless import
728 # rather than sleep n seconds, sleep until the next
733 # rather than sleep n seconds, sleep until the next
729 # multiple of n seconds
734 # multiple of n seconds
730 clock = time.time()
735 clock = time.time()
731 start = int(clock) - (int(clock) % delaywrite)
736 start = int(clock) - (int(clock) % delaywrite)
732 end = start + delaywrite
737 end = start + delaywrite
733 time.sleep(end - clock)
738 time.sleep(end - clock)
734 break
739 break
735
740
736 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
741 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
737 st.close()
742 st.close()
738 self._lastnormaltime = 0
743 self._lastnormaltime = 0
739 self._dirty = self._dirtypl = False
744 self._dirty = self._dirtypl = False
740
745
741 def _dirignore(self, f):
746 def _dirignore(self, f):
742 if f == '.':
747 if f == '.':
743 return False
748 return False
744 if self._ignore(f):
749 if self._ignore(f):
745 return True
750 return True
746 for p in util.finddirs(f):
751 for p in util.finddirs(f):
747 if self._ignore(p):
752 if self._ignore(p):
748 return True
753 return True
749 return False
754 return False
750
755
751 def _walkexplicit(self, match, subrepos):
756 def _walkexplicit(self, match, subrepos):
752 '''Get stat data about the files explicitly specified by match.
757 '''Get stat data about the files explicitly specified by match.
753
758
754 Return a triple (results, dirsfound, dirsnotfound).
759 Return a triple (results, dirsfound, dirsnotfound).
755 - results is a mapping from filename to stat result. It also contains
760 - results is a mapping from filename to stat result. It also contains
756 listings mapping subrepos and .hg to None.
761 listings mapping subrepos and .hg to None.
757 - dirsfound is a list of files found to be directories.
762 - dirsfound is a list of files found to be directories.
758 - dirsnotfound is a list of files that the dirstate thinks are
763 - dirsnotfound is a list of files that the dirstate thinks are
759 directories and that were not found.'''
764 directories and that were not found.'''
760
765
761 def badtype(mode):
766 def badtype(mode):
762 kind = _('unknown')
767 kind = _('unknown')
763 if stat.S_ISCHR(mode):
768 if stat.S_ISCHR(mode):
764 kind = _('character device')
769 kind = _('character device')
765 elif stat.S_ISBLK(mode):
770 elif stat.S_ISBLK(mode):
766 kind = _('block device')
771 kind = _('block device')
767 elif stat.S_ISFIFO(mode):
772 elif stat.S_ISFIFO(mode):
768 kind = _('fifo')
773 kind = _('fifo')
769 elif stat.S_ISSOCK(mode):
774 elif stat.S_ISSOCK(mode):
770 kind = _('socket')
775 kind = _('socket')
771 elif stat.S_ISDIR(mode):
776 elif stat.S_ISDIR(mode):
772 kind = _('directory')
777 kind = _('directory')
773 return _('unsupported file type (type is %s)') % kind
778 return _('unsupported file type (type is %s)') % kind
774
779
775 matchedir = match.explicitdir
780 matchedir = match.explicitdir
776 badfn = match.bad
781 badfn = match.bad
777 dmap = self._map
782 dmap = self._map
778 lstat = os.lstat
783 lstat = os.lstat
779 getkind = stat.S_IFMT
784 getkind = stat.S_IFMT
780 dirkind = stat.S_IFDIR
785 dirkind = stat.S_IFDIR
781 regkind = stat.S_IFREG
786 regkind = stat.S_IFREG
782 lnkkind = stat.S_IFLNK
787 lnkkind = stat.S_IFLNK
783 join = self._join
788 join = self._join
784 dirsfound = []
789 dirsfound = []
785 foundadd = dirsfound.append
790 foundadd = dirsfound.append
786 dirsnotfound = []
791 dirsnotfound = []
787 notfoundadd = dirsnotfound.append
792 notfoundadd = dirsnotfound.append
788
793
789 if not match.isexact() and self._checkcase:
794 if not match.isexact() and self._checkcase:
790 normalize = self._normalize
795 normalize = self._normalize
791 else:
796 else:
792 normalize = None
797 normalize = None
793
798
794 files = sorted(match.files())
799 files = sorted(match.files())
795 subrepos.sort()
800 subrepos.sort()
796 i, j = 0, 0
801 i, j = 0, 0
797 while i < len(files) and j < len(subrepos):
802 while i < len(files) and j < len(subrepos):
798 subpath = subrepos[j] + "/"
803 subpath = subrepos[j] + "/"
799 if files[i] < subpath:
804 if files[i] < subpath:
800 i += 1
805 i += 1
801 continue
806 continue
802 while i < len(files) and files[i].startswith(subpath):
807 while i < len(files) and files[i].startswith(subpath):
803 del files[i]
808 del files[i]
804 j += 1
809 j += 1
805
810
806 if not files or '.' in files:
811 if not files or '.' in files:
807 files = ['.']
812 files = ['.']
808 results = dict.fromkeys(subrepos)
813 results = dict.fromkeys(subrepos)
809 results['.hg'] = None
814 results['.hg'] = None
810
815
811 alldirs = None
816 alldirs = None
812 for ff in files:
817 for ff in files:
813 # constructing the foldmap is expensive, so don't do it for the
818 # constructing the foldmap is expensive, so don't do it for the
814 # common case where files is ['.']
819 # common case where files is ['.']
815 if normalize and ff != '.':
820 if normalize and ff != '.':
816 nf = normalize(ff, False, True)
821 nf = normalize(ff, False, True)
817 else:
822 else:
818 nf = ff
823 nf = ff
819 if nf in results:
824 if nf in results:
820 continue
825 continue
821
826
822 try:
827 try:
823 st = lstat(join(nf))
828 st = lstat(join(nf))
824 kind = getkind(st.st_mode)
829 kind = getkind(st.st_mode)
825 if kind == dirkind:
830 if kind == dirkind:
826 if nf in dmap:
831 if nf in dmap:
827 # file replaced by dir on disk but still in dirstate
832 # file replaced by dir on disk but still in dirstate
828 results[nf] = None
833 results[nf] = None
829 if matchedir:
834 if matchedir:
830 matchedir(nf)
835 matchedir(nf)
831 foundadd((nf, ff))
836 foundadd((nf, ff))
832 elif kind == regkind or kind == lnkkind:
837 elif kind == regkind or kind == lnkkind:
833 results[nf] = st
838 results[nf] = st
834 else:
839 else:
835 badfn(ff, badtype(kind))
840 badfn(ff, badtype(kind))
836 if nf in dmap:
841 if nf in dmap:
837 results[nf] = None
842 results[nf] = None
838 except OSError as inst: # nf not found on disk - it is dirstate only
843 except OSError as inst: # nf not found on disk - it is dirstate only
839 if nf in dmap: # does it exactly match a missing file?
844 if nf in dmap: # does it exactly match a missing file?
840 results[nf] = None
845 results[nf] = None
841 else: # does it match a missing directory?
846 else: # does it match a missing directory?
842 if alldirs is None:
847 if alldirs is None:
843 alldirs = util.dirs(dmap)
848 alldirs = util.dirs(dmap)
844 if nf in alldirs:
849 if nf in alldirs:
845 if matchedir:
850 if matchedir:
846 matchedir(nf)
851 matchedir(nf)
847 notfoundadd(nf)
852 notfoundadd(nf)
848 else:
853 else:
849 badfn(ff, inst.strerror)
854 badfn(ff, inst.strerror)
850
855
851 # Case insensitive filesystems cannot rely on lstat() failing to detect
856 # Case insensitive filesystems cannot rely on lstat() failing to detect
852 # a case-only rename. Prune the stat object for any file that does not
857 # a case-only rename. Prune the stat object for any file that does not
853 # match the case in the filesystem, if there are multiple files that
858 # match the case in the filesystem, if there are multiple files that
854 # normalize to the same path.
859 # normalize to the same path.
855 if match.isexact() and self._checkcase:
860 if match.isexact() and self._checkcase:
856 normed = {}
861 normed = {}
857
862
858 for f, st in results.iteritems():
863 for f, st in results.iteritems():
859 if st is None:
864 if st is None:
860 continue
865 continue
861
866
862 nc = util.normcase(f)
867 nc = util.normcase(f)
863 paths = normed.get(nc)
868 paths = normed.get(nc)
864
869
865 if paths is None:
870 if paths is None:
866 paths = set()
871 paths = set()
867 normed[nc] = paths
872 normed[nc] = paths
868
873
869 paths.add(f)
874 paths.add(f)
870
875
871 for norm, paths in normed.iteritems():
876 for norm, paths in normed.iteritems():
872 if len(paths) > 1:
877 if len(paths) > 1:
873 for path in paths:
878 for path in paths:
874 folded = self._discoverpath(path, norm, True, None,
879 folded = self._discoverpath(path, norm, True, None,
875 self._dirfoldmap)
880 self._dirfoldmap)
876 if path != folded:
881 if path != folded:
877 results[path] = None
882 results[path] = None
878
883
879 return results, dirsfound, dirsnotfound
884 return results, dirsfound, dirsnotfound
880
885
881 def walk(self, match, subrepos, unknown, ignored, full=True):
886 def walk(self, match, subrepos, unknown, ignored, full=True):
882 '''
887 '''
883 Walk recursively through the directory tree, finding all files
888 Walk recursively through the directory tree, finding all files
884 matched by match.
889 matched by match.
885
890
886 If full is False, maybe skip some known-clean files.
891 If full is False, maybe skip some known-clean files.
887
892
888 Return a dict mapping filename to stat-like object (either
893 Return a dict mapping filename to stat-like object (either
889 mercurial.osutil.stat instance or return value of os.stat()).
894 mercurial.osutil.stat instance or return value of os.stat()).
890
895
891 '''
896 '''
892 # full is a flag that extensions that hook into walk can use -- this
897 # full is a flag that extensions that hook into walk can use -- this
893 # implementation doesn't use it at all. This satisfies the contract
898 # implementation doesn't use it at all. This satisfies the contract
894 # because we only guarantee a "maybe".
899 # because we only guarantee a "maybe".
895
900
896 if ignored:
901 if ignored:
897 ignore = util.never
902 ignore = util.never
898 dirignore = util.never
903 dirignore = util.never
899 elif unknown:
904 elif unknown:
900 ignore = self._ignore
905 ignore = self._ignore
901 dirignore = self._dirignore
906 dirignore = self._dirignore
902 else:
907 else:
903 # if not unknown and not ignored, drop dir recursion and step 2
908 # if not unknown and not ignored, drop dir recursion and step 2
904 ignore = util.always
909 ignore = util.always
905 dirignore = util.always
910 dirignore = util.always
906
911
907 matchfn = match.matchfn
912 matchfn = match.matchfn
908 matchalways = match.always()
913 matchalways = match.always()
909 matchtdir = match.traversedir
914 matchtdir = match.traversedir
910 dmap = self._map
915 dmap = self._map
911 listdir = osutil.listdir
916 listdir = osutil.listdir
912 lstat = os.lstat
917 lstat = os.lstat
913 dirkind = stat.S_IFDIR
918 dirkind = stat.S_IFDIR
914 regkind = stat.S_IFREG
919 regkind = stat.S_IFREG
915 lnkkind = stat.S_IFLNK
920 lnkkind = stat.S_IFLNK
916 join = self._join
921 join = self._join
917
922
918 exact = skipstep3 = False
923 exact = skipstep3 = False
919 if match.isexact(): # match.exact
924 if match.isexact(): # match.exact
920 exact = True
925 exact = True
921 dirignore = util.always # skip step 2
926 dirignore = util.always # skip step 2
922 elif match.prefix(): # match.match, no patterns
927 elif match.prefix(): # match.match, no patterns
923 skipstep3 = True
928 skipstep3 = True
924
929
925 if not exact and self._checkcase:
930 if not exact and self._checkcase:
926 normalize = self._normalize
931 normalize = self._normalize
927 normalizefile = self._normalizefile
932 normalizefile = self._normalizefile
928 skipstep3 = False
933 skipstep3 = False
929 else:
934 else:
930 normalize = self._normalize
935 normalize = self._normalize
931 normalizefile = None
936 normalizefile = None
932
937
933 # step 1: find all explicit files
938 # step 1: find all explicit files
934 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
939 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
935
940
936 skipstep3 = skipstep3 and not (work or dirsnotfound)
941 skipstep3 = skipstep3 and not (work or dirsnotfound)
937 work = [d for d in work if not dirignore(d[0])]
942 work = [d for d in work if not dirignore(d[0])]
938
943
939 # step 2: visit subdirectories
944 # step 2: visit subdirectories
940 def traverse(work, alreadynormed):
945 def traverse(work, alreadynormed):
941 wadd = work.append
946 wadd = work.append
942 while work:
947 while work:
943 nd = work.pop()
948 nd = work.pop()
944 skip = None
949 skip = None
945 if nd == '.':
950 if nd == '.':
946 nd = ''
951 nd = ''
947 else:
952 else:
948 skip = '.hg'
953 skip = '.hg'
949 try:
954 try:
950 entries = listdir(join(nd), stat=True, skip=skip)
955 entries = listdir(join(nd), stat=True, skip=skip)
951 except OSError as inst:
956 except OSError as inst:
952 if inst.errno in (errno.EACCES, errno.ENOENT):
957 if inst.errno in (errno.EACCES, errno.ENOENT):
953 match.bad(self.pathto(nd), inst.strerror)
958 match.bad(self.pathto(nd), inst.strerror)
954 continue
959 continue
955 raise
960 raise
956 for f, kind, st in entries:
961 for f, kind, st in entries:
957 if normalizefile:
962 if normalizefile:
958 # even though f might be a directory, we're only
963 # even though f might be a directory, we're only
959 # interested in comparing it to files currently in the
964 # interested in comparing it to files currently in the
960 # dmap -- therefore normalizefile is enough
965 # dmap -- therefore normalizefile is enough
961 nf = normalizefile(nd and (nd + "/" + f) or f, True,
966 nf = normalizefile(nd and (nd + "/" + f) or f, True,
962 True)
967 True)
963 else:
968 else:
964 nf = nd and (nd + "/" + f) or f
969 nf = nd and (nd + "/" + f) or f
965 if nf not in results:
970 if nf not in results:
966 if kind == dirkind:
971 if kind == dirkind:
967 if not ignore(nf):
972 if not ignore(nf):
968 if matchtdir:
973 if matchtdir:
969 matchtdir(nf)
974 matchtdir(nf)
970 wadd(nf)
975 wadd(nf)
971 if nf in dmap and (matchalways or matchfn(nf)):
976 if nf in dmap and (matchalways or matchfn(nf)):
972 results[nf] = None
977 results[nf] = None
973 elif kind == regkind or kind == lnkkind:
978 elif kind == regkind or kind == lnkkind:
974 if nf in dmap:
979 if nf in dmap:
975 if matchalways or matchfn(nf):
980 if matchalways or matchfn(nf):
976 results[nf] = st
981 results[nf] = st
977 elif ((matchalways or matchfn(nf))
982 elif ((matchalways or matchfn(nf))
978 and not ignore(nf)):
983 and not ignore(nf)):
979 # unknown file -- normalize if necessary
984 # unknown file -- normalize if necessary
980 if not alreadynormed:
985 if not alreadynormed:
981 nf = normalize(nf, False, True)
986 nf = normalize(nf, False, True)
982 results[nf] = st
987 results[nf] = st
983 elif nf in dmap and (matchalways or matchfn(nf)):
988 elif nf in dmap and (matchalways or matchfn(nf)):
984 results[nf] = None
989 results[nf] = None
985
990
986 for nd, d in work:
991 for nd, d in work:
987 # alreadynormed means that processwork doesn't have to do any
992 # alreadynormed means that processwork doesn't have to do any
988 # expensive directory normalization
993 # expensive directory normalization
989 alreadynormed = not normalize or nd == d
994 alreadynormed = not normalize or nd == d
990 traverse([d], alreadynormed)
995 traverse([d], alreadynormed)
991
996
992 for s in subrepos:
997 for s in subrepos:
993 del results[s]
998 del results[s]
994 del results['.hg']
999 del results['.hg']
995
1000
996 # step 3: visit remaining files from dmap
1001 # step 3: visit remaining files from dmap
997 if not skipstep3 and not exact:
1002 if not skipstep3 and not exact:
998 # If a dmap file is not in results yet, it was either
1003 # If a dmap file is not in results yet, it was either
999 # a) not matching matchfn b) ignored, c) missing, or d) under a
1004 # a) not matching matchfn b) ignored, c) missing, or d) under a
1000 # symlink directory.
1005 # symlink directory.
1001 if not results and matchalways:
1006 if not results and matchalways:
1002 visit = dmap.keys()
1007 visit = dmap.keys()
1003 else:
1008 else:
1004 visit = [f for f in dmap if f not in results and matchfn(f)]
1009 visit = [f for f in dmap if f not in results and matchfn(f)]
1005 visit.sort()
1010 visit.sort()
1006
1011
1007 if unknown:
1012 if unknown:
1008 # unknown == True means we walked all dirs under the roots
1013 # unknown == True means we walked all dirs under the roots
1009 # that wasn't ignored, and everything that matched was stat'ed
1014 # that wasn't ignored, and everything that matched was stat'ed
1010 # and is already in results.
1015 # and is already in results.
1011 # The rest must thus be ignored or under a symlink.
1016 # The rest must thus be ignored or under a symlink.
1012 audit_path = pathutil.pathauditor(self._root)
1017 audit_path = pathutil.pathauditor(self._root)
1013
1018
1014 for nf in iter(visit):
1019 for nf in iter(visit):
1015 # If a stat for the same file was already added with a
1020 # If a stat for the same file was already added with a
1016 # different case, don't add one for this, since that would
1021 # different case, don't add one for this, since that would
1017 # make it appear as if the file exists under both names
1022 # make it appear as if the file exists under both names
1018 # on disk.
1023 # on disk.
1019 if (normalizefile and
1024 if (normalizefile and
1020 normalizefile(nf, True, True) in results):
1025 normalizefile(nf, True, True) in results):
1021 results[nf] = None
1026 results[nf] = None
1022 # Report ignored items in the dmap as long as they are not
1027 # Report ignored items in the dmap as long as they are not
1023 # under a symlink directory.
1028 # under a symlink directory.
1024 elif audit_path.check(nf):
1029 elif audit_path.check(nf):
1025 try:
1030 try:
1026 results[nf] = lstat(join(nf))
1031 results[nf] = lstat(join(nf))
1027 # file was just ignored, no links, and exists
1032 # file was just ignored, no links, and exists
1028 except OSError:
1033 except OSError:
1029 # file doesn't exist
1034 # file doesn't exist
1030 results[nf] = None
1035 results[nf] = None
1031 else:
1036 else:
1032 # It's either missing or under a symlink directory
1037 # It's either missing or under a symlink directory
1033 # which we in this case report as missing
1038 # which we in this case report as missing
1034 results[nf] = None
1039 results[nf] = None
1035 else:
1040 else:
1036 # We may not have walked the full directory tree above,
1041 # We may not have walked the full directory tree above,
1037 # so stat and check everything we missed.
1042 # so stat and check everything we missed.
1038 nf = iter(visit).next
1043 nf = iter(visit).next
1039 for st in util.statfiles([join(i) for i in visit]):
1044 for st in util.statfiles([join(i) for i in visit]):
1040 results[nf()] = st
1045 results[nf()] = st
1041 return results
1046 return results
1042
1047
1043 def status(self, match, subrepos, ignored, clean, unknown):
1048 def status(self, match, subrepos, ignored, clean, unknown):
1044 '''Determine the status of the working copy relative to the
1049 '''Determine the status of the working copy relative to the
1045 dirstate and return a pair of (unsure, status), where status is of type
1050 dirstate and return a pair of (unsure, status), where status is of type
1046 scmutil.status and:
1051 scmutil.status and:
1047
1052
1048 unsure:
1053 unsure:
1049 files that might have been modified since the dirstate was
1054 files that might have been modified since the dirstate was
1050 written, but need to be read to be sure (size is the same
1055 written, but need to be read to be sure (size is the same
1051 but mtime differs)
1056 but mtime differs)
1052 status.modified:
1057 status.modified:
1053 files that have definitely been modified since the dirstate
1058 files that have definitely been modified since the dirstate
1054 was written (different size or mode)
1059 was written (different size or mode)
1055 status.clean:
1060 status.clean:
1056 files that have definitely not been modified since the
1061 files that have definitely not been modified since the
1057 dirstate was written
1062 dirstate was written
1058 '''
1063 '''
1059 listignored, listclean, listunknown = ignored, clean, unknown
1064 listignored, listclean, listunknown = ignored, clean, unknown
1060 lookup, modified, added, unknown, ignored = [], [], [], [], []
1065 lookup, modified, added, unknown, ignored = [], [], [], [], []
1061 removed, deleted, clean = [], [], []
1066 removed, deleted, clean = [], [], []
1062
1067
1063 dmap = self._map
1068 dmap = self._map
1064 ladd = lookup.append # aka "unsure"
1069 ladd = lookup.append # aka "unsure"
1065 madd = modified.append
1070 madd = modified.append
1066 aadd = added.append
1071 aadd = added.append
1067 uadd = unknown.append
1072 uadd = unknown.append
1068 iadd = ignored.append
1073 iadd = ignored.append
1069 radd = removed.append
1074 radd = removed.append
1070 dadd = deleted.append
1075 dadd = deleted.append
1071 cadd = clean.append
1076 cadd = clean.append
1072 mexact = match.exact
1077 mexact = match.exact
1073 dirignore = self._dirignore
1078 dirignore = self._dirignore
1074 checkexec = self._checkexec
1079 checkexec = self._checkexec
1075 copymap = self._copymap
1080 copymap = self._copymap
1076 lastnormaltime = self._lastnormaltime
1081 lastnormaltime = self._lastnormaltime
1077
1082
1078 # We need to do full walks when either
1083 # We need to do full walks when either
1079 # - we're listing all clean files, or
1084 # - we're listing all clean files, or
1080 # - match.traversedir does something, because match.traversedir should
1085 # - match.traversedir does something, because match.traversedir should
1081 # be called for every dir in the working dir
1086 # be called for every dir in the working dir
1082 full = listclean or match.traversedir is not None
1087 full = listclean or match.traversedir is not None
1083 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1088 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1084 full=full).iteritems():
1089 full=full).iteritems():
1085 if fn not in dmap:
1090 if fn not in dmap:
1086 if (listignored or mexact(fn)) and dirignore(fn):
1091 if (listignored or mexact(fn)) and dirignore(fn):
1087 if listignored:
1092 if listignored:
1088 iadd(fn)
1093 iadd(fn)
1089 else:
1094 else:
1090 uadd(fn)
1095 uadd(fn)
1091 continue
1096 continue
1092
1097
1093 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1098 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1094 # written like that for performance reasons. dmap[fn] is not a
1099 # written like that for performance reasons. dmap[fn] is not a
1095 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1100 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1096 # opcode has fast paths when the value to be unpacked is a tuple or
1101 # opcode has fast paths when the value to be unpacked is a tuple or
1097 # a list, but falls back to creating a full-fledged iterator in
1102 # a list, but falls back to creating a full-fledged iterator in
1098 # general. That is much slower than simply accessing and storing the
1103 # general. That is much slower than simply accessing and storing the
1099 # tuple members one by one.
1104 # tuple members one by one.
1100 t = dmap[fn]
1105 t = dmap[fn]
1101 state = t[0]
1106 state = t[0]
1102 mode = t[1]
1107 mode = t[1]
1103 size = t[2]
1108 size = t[2]
1104 time = t[3]
1109 time = t[3]
1105
1110
1106 if not st and state in "nma":
1111 if not st and state in "nma":
1107 dadd(fn)
1112 dadd(fn)
1108 elif state == 'n':
1113 elif state == 'n':
1109 if (size >= 0 and
1114 if (size >= 0 and
1110 ((size != st.st_size and size != st.st_size & _rangemask)
1115 ((size != st.st_size and size != st.st_size & _rangemask)
1111 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1116 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1112 or size == -2 # other parent
1117 or size == -2 # other parent
1113 or fn in copymap):
1118 or fn in copymap):
1114 madd(fn)
1119 madd(fn)
1115 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1120 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1116 ladd(fn)
1121 ladd(fn)
1117 elif st.st_mtime == lastnormaltime:
1122 elif st.st_mtime == lastnormaltime:
1118 # fn may have just been marked as normal and it may have
1123 # fn may have just been marked as normal and it may have
1119 # changed in the same second without changing its size.
1124 # changed in the same second without changing its size.
1120 # This can happen if we quickly do multiple commits.
1125 # This can happen if we quickly do multiple commits.
1121 # Force lookup, so we don't miss such a racy file change.
1126 # Force lookup, so we don't miss such a racy file change.
1122 ladd(fn)
1127 ladd(fn)
1123 elif listclean:
1128 elif listclean:
1124 cadd(fn)
1129 cadd(fn)
1125 elif state == 'm':
1130 elif state == 'm':
1126 madd(fn)
1131 madd(fn)
1127 elif state == 'a':
1132 elif state == 'a':
1128 aadd(fn)
1133 aadd(fn)
1129 elif state == 'r':
1134 elif state == 'r':
1130 radd(fn)
1135 radd(fn)
1131
1136
1132 return (lookup, scmutil.status(modified, added, removed, deleted,
1137 return (lookup, scmutil.status(modified, added, removed, deleted,
1133 unknown, ignored, clean))
1138 unknown, ignored, clean))
1134
1139
1135 def matches(self, match):
1140 def matches(self, match):
1136 '''
1141 '''
1137 return files in the dirstate (in whatever state) filtered by match
1142 return files in the dirstate (in whatever state) filtered by match
1138 '''
1143 '''
1139 dmap = self._map
1144 dmap = self._map
1140 if match.always():
1145 if match.always():
1141 return dmap.keys()
1146 return dmap.keys()
1142 files = match.files()
1147 files = match.files()
1143 if match.isexact():
1148 if match.isexact():
1144 # fast path -- filter the other way around, since typically files is
1149 # fast path -- filter the other way around, since typically files is
1145 # much smaller than dmap
1150 # much smaller than dmap
1146 return [f for f in files if f in dmap]
1151 return [f for f in files if f in dmap]
1147 if match.prefix() and all(fn in dmap for fn in files):
1152 if match.prefix() and all(fn in dmap for fn in files):
1148 # fast path -- all the values are known to be files, so just return
1153 # fast path -- all the values are known to be files, so just return
1149 # that
1154 # that
1150 return list(files)
1155 return list(files)
1151 return [f for f in dmap if match(f)]
1156 return [f for f in dmap if match(f)]
1152
1157
1153 def _actualfilename(self, tr):
1158 def _actualfilename(self, tr):
1154 if tr:
1159 if tr:
1155 return self._pendingfilename
1160 return self._pendingfilename
1156 else:
1161 else:
1157 return self._filename
1162 return self._filename
1158
1163
1159 def _savebackup(self, tr, suffix):
1164 def _savebackup(self, tr, suffix):
1160 '''Save current dirstate into backup file with suffix'''
1165 '''Save current dirstate into backup file with suffix'''
1161 filename = self._actualfilename(tr)
1166 filename = self._actualfilename(tr)
1162
1167
1163 # use '_writedirstate' instead of 'write' to write changes certainly,
1168 # use '_writedirstate' instead of 'write' to write changes certainly,
1164 # because the latter omits writing out if transaction is running.
1169 # because the latter omits writing out if transaction is running.
1165 # output file will be used to create backup of dirstate at this point.
1170 # output file will be used to create backup of dirstate at this point.
1166 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1171 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1167
1172
1168 if tr:
1173 if tr:
1169 # ensure that subsequent tr.writepending returns True for
1174 # ensure that subsequent tr.writepending returns True for
1170 # changes written out above, even if dirstate is never
1175 # changes written out above, even if dirstate is never
1171 # changed after this
1176 # changed after this
1172 tr.addfilegenerator('dirstate', (self._filename,),
1177 tr.addfilegenerator('dirstate', (self._filename,),
1173 self._writedirstate, location='plain')
1178 self._writedirstate, location='plain')
1174
1179
1175 # ensure that pending file written above is unlinked at
1180 # ensure that pending file written above is unlinked at
1176 # failure, even if tr.writepending isn't invoked until the
1181 # failure, even if tr.writepending isn't invoked until the
1177 # end of this transaction
1182 # end of this transaction
1178 tr.registertmp(filename, location='plain')
1183 tr.registertmp(filename, location='plain')
1179
1184
1180 self._opener.write(filename + suffix, self._opener.tryread(filename))
1185 self._opener.write(filename + suffix, self._opener.tryread(filename))
1181
1186
1182 def _restorebackup(self, tr, suffix):
1187 def _restorebackup(self, tr, suffix):
1183 '''Restore dirstate by backup file with suffix'''
1188 '''Restore dirstate by backup file with suffix'''
1184 # this "invalidate()" prevents "wlock.release()" from writing
1189 # this "invalidate()" prevents "wlock.release()" from writing
1185 # changes of dirstate out after restoring from backup file
1190 # changes of dirstate out after restoring from backup file
1186 self.invalidate()
1191 self.invalidate()
1187 filename = self._actualfilename(tr)
1192 filename = self._actualfilename(tr)
1188 self._opener.rename(filename + suffix, filename)
1193 self._opener.rename(filename + suffix, filename)
1189
1194
1190 def _clearbackup(self, tr, suffix):
1195 def _clearbackup(self, tr, suffix):
1191 '''Clear backup file with suffix'''
1196 '''Clear backup file with suffix'''
1192 filename = self._actualfilename(tr)
1197 filename = self._actualfilename(tr)
1193 self._opener.unlink(filename + suffix)
1198 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now