##// END OF EJS Templates
dirstate: attach the nonnormalset to a propertycache...
Laurent Charignon -
r27589:3e4f9d78 default
parent child Browse files
Show More
@@ -1,1198 +1,1202 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import nullid
15 from .node import nullid
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 osutil,
20 osutil,
21 parsers,
21 parsers,
22 pathutil,
22 pathutil,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 propertycache = util.propertycache
27 propertycache = util.propertycache
28 filecache = scmutil.filecache
28 filecache = scmutil.filecache
29 _rangemask = 0x7fffffff
29 _rangemask = 0x7fffffff
30
30
31 dirstatetuple = parsers.dirstatetuple
31 dirstatetuple = parsers.dirstatetuple
32
32
33 class repocache(filecache):
33 class repocache(filecache):
34 """filecache for files in .hg/"""
34 """filecache for files in .hg/"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj._opener.join(fname)
36 return obj._opener.join(fname)
37
37
38 class rootcache(filecache):
38 class rootcache(filecache):
39 """filecache for files in the repository root"""
39 """filecache for files in the repository root"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._join(fname)
41 return obj._join(fname)
42
42
43 def _getfsnow(vfs):
43 def _getfsnow(vfs):
44 '''Get "now" timestamp on filesystem'''
44 '''Get "now" timestamp on filesystem'''
45 tmpfd, tmpname = vfs.mkstemp()
45 tmpfd, tmpname = vfs.mkstemp()
46 try:
46 try:
47 return os.fstat(tmpfd).st_mtime
47 return os.fstat(tmpfd).st_mtime
48 finally:
48 finally:
49 os.close(tmpfd)
49 os.close(tmpfd)
50 vfs.unlink(tmpname)
50 vfs.unlink(tmpname)
51
51
52 def nonnormalentries(dmap):
52 def nonnormalentries(dmap):
53 '''Compute the nonnormal dirstate entries from the dmap'''
53 '''Compute the nonnormal dirstate entries from the dmap'''
54 return set(fname for fname, e in dmap.iteritems()
54 return set(fname for fname, e in dmap.iteritems()
55 if e[0] != 'n' or e[3] == -1)
55 if e[0] != 'n' or e[3] == -1)
56
56
57 def _trypending(root, vfs, filename):
57 def _trypending(root, vfs, filename):
58 '''Open file to be read according to HG_PENDING environment variable
58 '''Open file to be read according to HG_PENDING environment variable
59
59
60 This opens '.pending' of specified 'filename' only when HG_PENDING
60 This opens '.pending' of specified 'filename' only when HG_PENDING
61 is equal to 'root'.
61 is equal to 'root'.
62
62
63 This returns '(fp, is_pending_opened)' tuple.
63 This returns '(fp, is_pending_opened)' tuple.
64 '''
64 '''
65 if root == os.environ.get('HG_PENDING'):
65 if root == os.environ.get('HG_PENDING'):
66 try:
66 try:
67 return (vfs('%s.pending' % filename), True)
67 return (vfs('%s.pending' % filename), True)
68 except IOError as inst:
68 except IOError as inst:
69 if inst.errno != errno.ENOENT:
69 if inst.errno != errno.ENOENT:
70 raise
70 raise
71 return (vfs(filename), False)
71 return (vfs(filename), False)
72
72
73 class dirstate(object):
73 class dirstate(object):
74
74
75 def __init__(self, opener, ui, root, validate):
75 def __init__(self, opener, ui, root, validate):
76 '''Create a new dirstate object.
76 '''Create a new dirstate object.
77
77
78 opener is an open()-like callable that can be used to open the
78 opener is an open()-like callable that can be used to open the
79 dirstate file; root is the root of the directory tracked by
79 dirstate file; root is the root of the directory tracked by
80 the dirstate.
80 the dirstate.
81 '''
81 '''
82 self._opener = opener
82 self._opener = opener
83 self._validate = validate
83 self._validate = validate
84 self._root = root
84 self._root = root
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
86 # UNC path pointing to root share (issue4557)
86 # UNC path pointing to root share (issue4557)
87 self._rootdir = pathutil.normasprefix(root)
87 self._rootdir = pathutil.normasprefix(root)
88 # internal config: ui.forcecwd
88 # internal config: ui.forcecwd
89 forcecwd = ui.config('ui', 'forcecwd')
89 forcecwd = ui.config('ui', 'forcecwd')
90 if forcecwd:
90 if forcecwd:
91 self._cwd = forcecwd
91 self._cwd = forcecwd
92 self._dirty = False
92 self._dirty = False
93 self._dirtypl = False
93 self._dirtypl = False
94 self._lastnormaltime = 0
94 self._lastnormaltime = 0
95 self._ui = ui
95 self._ui = ui
96 self._filecache = {}
96 self._filecache = {}
97 self._parentwriters = 0
97 self._parentwriters = 0
98 self._filename = 'dirstate'
98 self._filename = 'dirstate'
99 self._pendingfilename = '%s.pending' % self._filename
99 self._pendingfilename = '%s.pending' % self._filename
100
100
101 # for consistent view between _pl() and _read() invocations
101 # for consistent view between _pl() and _read() invocations
102 self._pendingmode = None
102 self._pendingmode = None
103
103
104 def beginparentchange(self):
104 def beginparentchange(self):
105 '''Marks the beginning of a set of changes that involve changing
105 '''Marks the beginning of a set of changes that involve changing
106 the dirstate parents. If there is an exception during this time,
106 the dirstate parents. If there is an exception during this time,
107 the dirstate will not be written when the wlock is released. This
107 the dirstate will not be written when the wlock is released. This
108 prevents writing an incoherent dirstate where the parent doesn't
108 prevents writing an incoherent dirstate where the parent doesn't
109 match the contents.
109 match the contents.
110 '''
110 '''
111 self._parentwriters += 1
111 self._parentwriters += 1
112
112
113 def endparentchange(self):
113 def endparentchange(self):
114 '''Marks the end of a set of changes that involve changing the
114 '''Marks the end of a set of changes that involve changing the
115 dirstate parents. Once all parent changes have been marked done,
115 dirstate parents. Once all parent changes have been marked done,
116 the wlock will be free to write the dirstate on release.
116 the wlock will be free to write the dirstate on release.
117 '''
117 '''
118 if self._parentwriters > 0:
118 if self._parentwriters > 0:
119 self._parentwriters -= 1
119 self._parentwriters -= 1
120
120
121 def pendingparentchange(self):
121 def pendingparentchange(self):
122 '''Returns true if the dirstate is in the middle of a set of changes
122 '''Returns true if the dirstate is in the middle of a set of changes
123 that modify the dirstate parent.
123 that modify the dirstate parent.
124 '''
124 '''
125 return self._parentwriters > 0
125 return self._parentwriters > 0
126
126
127 @propertycache
127 @propertycache
128 def _map(self):
128 def _map(self):
129 '''Return the dirstate contents as a map from filename to
129 '''Return the dirstate contents as a map from filename to
130 (state, mode, size, time).'''
130 (state, mode, size, time).'''
131 self._read()
131 self._read()
132 return self._map
132 return self._map
133
133
134 @propertycache
134 @propertycache
135 def _copymap(self):
135 def _copymap(self):
136 self._read()
136 self._read()
137 return self._copymap
137 return self._copymap
138
138
139 @propertycache
139 @propertycache
140 def _nonnormalset(self):
141 return nonnormalentries(self._map)
142
143 @propertycache
140 def _filefoldmap(self):
144 def _filefoldmap(self):
141 try:
145 try:
142 makefilefoldmap = parsers.make_file_foldmap
146 makefilefoldmap = parsers.make_file_foldmap
143 except AttributeError:
147 except AttributeError:
144 pass
148 pass
145 else:
149 else:
146 return makefilefoldmap(self._map, util.normcasespec,
150 return makefilefoldmap(self._map, util.normcasespec,
147 util.normcasefallback)
151 util.normcasefallback)
148
152
149 f = {}
153 f = {}
150 normcase = util.normcase
154 normcase = util.normcase
151 for name, s in self._map.iteritems():
155 for name, s in self._map.iteritems():
152 if s[0] != 'r':
156 if s[0] != 'r':
153 f[normcase(name)] = name
157 f[normcase(name)] = name
154 f['.'] = '.' # prevents useless util.fspath() invocation
158 f['.'] = '.' # prevents useless util.fspath() invocation
155 return f
159 return f
156
160
157 @propertycache
161 @propertycache
158 def _dirfoldmap(self):
162 def _dirfoldmap(self):
159 f = {}
163 f = {}
160 normcase = util.normcase
164 normcase = util.normcase
161 for name in self._dirs:
165 for name in self._dirs:
162 f[normcase(name)] = name
166 f[normcase(name)] = name
163 return f
167 return f
164
168
165 @repocache('branch')
169 @repocache('branch')
166 def _branch(self):
170 def _branch(self):
167 try:
171 try:
168 return self._opener.read("branch").strip() or "default"
172 return self._opener.read("branch").strip() or "default"
169 except IOError as inst:
173 except IOError as inst:
170 if inst.errno != errno.ENOENT:
174 if inst.errno != errno.ENOENT:
171 raise
175 raise
172 return "default"
176 return "default"
173
177
174 @propertycache
178 @propertycache
175 def _pl(self):
179 def _pl(self):
176 try:
180 try:
177 fp = self._opendirstatefile()
181 fp = self._opendirstatefile()
178 st = fp.read(40)
182 st = fp.read(40)
179 fp.close()
183 fp.close()
180 l = len(st)
184 l = len(st)
181 if l == 40:
185 if l == 40:
182 return st[:20], st[20:40]
186 return st[:20], st[20:40]
183 elif l > 0 and l < 40:
187 elif l > 0 and l < 40:
184 raise error.Abort(_('working directory state appears damaged!'))
188 raise error.Abort(_('working directory state appears damaged!'))
185 except IOError as err:
189 except IOError as err:
186 if err.errno != errno.ENOENT:
190 if err.errno != errno.ENOENT:
187 raise
191 raise
188 return [nullid, nullid]
192 return [nullid, nullid]
189
193
190 @propertycache
194 @propertycache
191 def _dirs(self):
195 def _dirs(self):
192 return util.dirs(self._map, 'r')
196 return util.dirs(self._map, 'r')
193
197
194 def dirs(self):
198 def dirs(self):
195 return self._dirs
199 return self._dirs
196
200
197 @rootcache('.hgignore')
201 @rootcache('.hgignore')
198 def _ignore(self):
202 def _ignore(self):
199 files = []
203 files = []
200 if os.path.exists(self._join('.hgignore')):
204 if os.path.exists(self._join('.hgignore')):
201 files.append(self._join('.hgignore'))
205 files.append(self._join('.hgignore'))
202 for name, path in self._ui.configitems("ui"):
206 for name, path in self._ui.configitems("ui"):
203 if name == 'ignore' or name.startswith('ignore.'):
207 if name == 'ignore' or name.startswith('ignore.'):
204 # we need to use os.path.join here rather than self._join
208 # we need to use os.path.join here rather than self._join
205 # because path is arbitrary and user-specified
209 # because path is arbitrary and user-specified
206 files.append(os.path.join(self._rootdir, util.expandpath(path)))
210 files.append(os.path.join(self._rootdir, util.expandpath(path)))
207
211
208 if not files:
212 if not files:
209 return util.never
213 return util.never
210
214
211 pats = ['include:%s' % f for f in files]
215 pats = ['include:%s' % f for f in files]
212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
213
217
214 @propertycache
218 @propertycache
215 def _slash(self):
219 def _slash(self):
216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
220 return self._ui.configbool('ui', 'slash') and os.sep != '/'
217
221
218 @propertycache
222 @propertycache
219 def _checklink(self):
223 def _checklink(self):
220 return util.checklink(self._root)
224 return util.checklink(self._root)
221
225
222 @propertycache
226 @propertycache
223 def _checkexec(self):
227 def _checkexec(self):
224 return util.checkexec(self._root)
228 return util.checkexec(self._root)
225
229
226 @propertycache
230 @propertycache
227 def _checkcase(self):
231 def _checkcase(self):
228 return not util.checkcase(self._join('.hg'))
232 return not util.checkcase(self._join('.hg'))
229
233
230 def _join(self, f):
234 def _join(self, f):
231 # much faster than os.path.join()
235 # much faster than os.path.join()
232 # it's safe because f is always a relative path
236 # it's safe because f is always a relative path
233 return self._rootdir + f
237 return self._rootdir + f
234
238
235 def flagfunc(self, buildfallback):
239 def flagfunc(self, buildfallback):
236 if self._checklink and self._checkexec:
240 if self._checklink and self._checkexec:
237 def f(x):
241 def f(x):
238 try:
242 try:
239 st = os.lstat(self._join(x))
243 st = os.lstat(self._join(x))
240 if util.statislink(st):
244 if util.statislink(st):
241 return 'l'
245 return 'l'
242 if util.statisexec(st):
246 if util.statisexec(st):
243 return 'x'
247 return 'x'
244 except OSError:
248 except OSError:
245 pass
249 pass
246 return ''
250 return ''
247 return f
251 return f
248
252
249 fallback = buildfallback()
253 fallback = buildfallback()
250 if self._checklink:
254 if self._checklink:
251 def f(x):
255 def f(x):
252 if os.path.islink(self._join(x)):
256 if os.path.islink(self._join(x)):
253 return 'l'
257 return 'l'
254 if 'x' in fallback(x):
258 if 'x' in fallback(x):
255 return 'x'
259 return 'x'
256 return ''
260 return ''
257 return f
261 return f
258 if self._checkexec:
262 if self._checkexec:
259 def f(x):
263 def f(x):
260 if 'l' in fallback(x):
264 if 'l' in fallback(x):
261 return 'l'
265 return 'l'
262 if util.isexec(self._join(x)):
266 if util.isexec(self._join(x)):
263 return 'x'
267 return 'x'
264 return ''
268 return ''
265 return f
269 return f
266 else:
270 else:
267 return fallback
271 return fallback
268
272
269 @propertycache
273 @propertycache
270 def _cwd(self):
274 def _cwd(self):
271 return os.getcwd()
275 return os.getcwd()
272
276
273 def getcwd(self):
277 def getcwd(self):
274 '''Return the path from which a canonical path is calculated.
278 '''Return the path from which a canonical path is calculated.
275
279
276 This path should be used to resolve file patterns or to convert
280 This path should be used to resolve file patterns or to convert
277 canonical paths back to file paths for display. It shouldn't be
281 canonical paths back to file paths for display. It shouldn't be
278 used to get real file paths. Use vfs functions instead.
282 used to get real file paths. Use vfs functions instead.
279 '''
283 '''
280 cwd = self._cwd
284 cwd = self._cwd
281 if cwd == self._root:
285 if cwd == self._root:
282 return ''
286 return ''
283 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
284 rootsep = self._root
288 rootsep = self._root
285 if not util.endswithsep(rootsep):
289 if not util.endswithsep(rootsep):
286 rootsep += os.sep
290 rootsep += os.sep
287 if cwd.startswith(rootsep):
291 if cwd.startswith(rootsep):
288 return cwd[len(rootsep):]
292 return cwd[len(rootsep):]
289 else:
293 else:
290 # we're outside the repo. return an absolute path.
294 # we're outside the repo. return an absolute path.
291 return cwd
295 return cwd
292
296
293 def pathto(self, f, cwd=None):
297 def pathto(self, f, cwd=None):
294 if cwd is None:
298 if cwd is None:
295 cwd = self.getcwd()
299 cwd = self.getcwd()
296 path = util.pathto(self._root, cwd, f)
300 path = util.pathto(self._root, cwd, f)
297 if self._slash:
301 if self._slash:
298 return util.pconvert(path)
302 return util.pconvert(path)
299 return path
303 return path
300
304
301 def __getitem__(self, key):
305 def __getitem__(self, key):
302 '''Return the current state of key (a filename) in the dirstate.
306 '''Return the current state of key (a filename) in the dirstate.
303
307
304 States are:
308 States are:
305 n normal
309 n normal
306 m needs merging
310 m needs merging
307 r marked for removal
311 r marked for removal
308 a marked for addition
312 a marked for addition
309 ? not tracked
313 ? not tracked
310 '''
314 '''
311 return self._map.get(key, ("?",))[0]
315 return self._map.get(key, ("?",))[0]
312
316
313 def __contains__(self, key):
317 def __contains__(self, key):
314 return key in self._map
318 return key in self._map
315
319
316 def __iter__(self):
320 def __iter__(self):
317 for x in sorted(self._map):
321 for x in sorted(self._map):
318 yield x
322 yield x
319
323
320 def iteritems(self):
324 def iteritems(self):
321 return self._map.iteritems()
325 return self._map.iteritems()
322
326
323 def parents(self):
327 def parents(self):
324 return [self._validate(p) for p in self._pl]
328 return [self._validate(p) for p in self._pl]
325
329
326 def p1(self):
330 def p1(self):
327 return self._validate(self._pl[0])
331 return self._validate(self._pl[0])
328
332
329 def p2(self):
333 def p2(self):
330 return self._validate(self._pl[1])
334 return self._validate(self._pl[1])
331
335
332 def branch(self):
336 def branch(self):
333 return encoding.tolocal(self._branch)
337 return encoding.tolocal(self._branch)
334
338
335 def setparents(self, p1, p2=nullid):
339 def setparents(self, p1, p2=nullid):
336 """Set dirstate parents to p1 and p2.
340 """Set dirstate parents to p1 and p2.
337
341
338 When moving from two parents to one, 'm' merged entries a
342 When moving from two parents to one, 'm' merged entries a
339 adjusted to normal and previous copy records discarded and
343 adjusted to normal and previous copy records discarded and
340 returned by the call.
344 returned by the call.
341
345
342 See localrepo.setparents()
346 See localrepo.setparents()
343 """
347 """
344 if self._parentwriters == 0:
348 if self._parentwriters == 0:
345 raise ValueError("cannot set dirstate parent without "
349 raise ValueError("cannot set dirstate parent without "
346 "calling dirstate.beginparentchange")
350 "calling dirstate.beginparentchange")
347
351
348 self._dirty = self._dirtypl = True
352 self._dirty = self._dirtypl = True
349 oldp2 = self._pl[1]
353 oldp2 = self._pl[1]
350 self._pl = p1, p2
354 self._pl = p1, p2
351 copies = {}
355 copies = {}
352 if oldp2 != nullid and p2 == nullid:
356 if oldp2 != nullid and p2 == nullid:
353 for f, s in self._map.iteritems():
357 for f, s in self._map.iteritems():
354 # Discard 'm' markers when moving away from a merge state
358 # Discard 'm' markers when moving away from a merge state
355 if s[0] == 'm':
359 if s[0] == 'm':
356 if f in self._copymap:
360 if f in self._copymap:
357 copies[f] = self._copymap[f]
361 copies[f] = self._copymap[f]
358 self.normallookup(f)
362 self.normallookup(f)
359 # Also fix up otherparent markers
363 # Also fix up otherparent markers
360 elif s[0] == 'n' and s[2] == -2:
364 elif s[0] == 'n' and s[2] == -2:
361 if f in self._copymap:
365 if f in self._copymap:
362 copies[f] = self._copymap[f]
366 copies[f] = self._copymap[f]
363 self.add(f)
367 self.add(f)
364 return copies
368 return copies
365
369
366 def setbranch(self, branch):
370 def setbranch(self, branch):
367 self._branch = encoding.fromlocal(branch)
371 self._branch = encoding.fromlocal(branch)
368 f = self._opener('branch', 'w', atomictemp=True)
372 f = self._opener('branch', 'w', atomictemp=True)
369 try:
373 try:
370 f.write(self._branch + '\n')
374 f.write(self._branch + '\n')
371 f.close()
375 f.close()
372
376
373 # make sure filecache has the correct stat info for _branch after
377 # make sure filecache has the correct stat info for _branch after
374 # replacing the underlying file
378 # replacing the underlying file
375 ce = self._filecache['_branch']
379 ce = self._filecache['_branch']
376 if ce:
380 if ce:
377 ce.refresh()
381 ce.refresh()
378 except: # re-raises
382 except: # re-raises
379 f.discard()
383 f.discard()
380 raise
384 raise
381
385
382 def _opendirstatefile(self):
386 def _opendirstatefile(self):
383 fp, mode = _trypending(self._root, self._opener, self._filename)
387 fp, mode = _trypending(self._root, self._opener, self._filename)
384 if self._pendingmode is not None and self._pendingmode != mode:
388 if self._pendingmode is not None and self._pendingmode != mode:
385 fp.close()
389 fp.close()
386 raise error.Abort(_('working directory state may be '
390 raise error.Abort(_('working directory state may be '
387 'changed parallelly'))
391 'changed parallelly'))
388 self._pendingmode = mode
392 self._pendingmode = mode
389 return fp
393 return fp
390
394
391 def _read(self):
395 def _read(self):
392 self._map = {}
396 self._map = {}
393 self._copymap = {}
397 self._copymap = {}
394 try:
398 try:
395 fp = self._opendirstatefile()
399 fp = self._opendirstatefile()
396 try:
400 try:
397 st = fp.read()
401 st = fp.read()
398 finally:
402 finally:
399 fp.close()
403 fp.close()
400 except IOError as err:
404 except IOError as err:
401 if err.errno != errno.ENOENT:
405 if err.errno != errno.ENOENT:
402 raise
406 raise
403 return
407 return
404 if not st:
408 if not st:
405 return
409 return
406
410
407 if util.safehasattr(parsers, 'dict_new_presized'):
411 if util.safehasattr(parsers, 'dict_new_presized'):
408 # Make an estimate of the number of files in the dirstate based on
412 # Make an estimate of the number of files in the dirstate based on
409 # its size. From a linear regression on a set of real-world repos,
413 # its size. From a linear regression on a set of real-world repos,
410 # all over 10,000 files, the size of a dirstate entry is 85
414 # all over 10,000 files, the size of a dirstate entry is 85
411 # bytes. The cost of resizing is significantly higher than the cost
415 # bytes. The cost of resizing is significantly higher than the cost
412 # of filling in a larger presized dict, so subtract 20% from the
416 # of filling in a larger presized dict, so subtract 20% from the
413 # size.
417 # size.
414 #
418 #
415 # This heuristic is imperfect in many ways, so in a future dirstate
419 # This heuristic is imperfect in many ways, so in a future dirstate
416 # format update it makes sense to just record the number of entries
420 # format update it makes sense to just record the number of entries
417 # on write.
421 # on write.
418 self._map = parsers.dict_new_presized(len(st) / 71)
422 self._map = parsers.dict_new_presized(len(st) / 71)
419
423
420 # Python's garbage collector triggers a GC each time a certain number
424 # Python's garbage collector triggers a GC each time a certain number
421 # of container objects (the number being defined by
425 # of container objects (the number being defined by
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 # for each file in the dirstate. The C version then immediately marks
427 # for each file in the dirstate. The C version then immediately marks
424 # them as not to be tracked by the collector. However, this has no
428 # them as not to be tracked by the collector. However, this has no
425 # effect on when GCs are triggered, only on what objects the GC looks
429 # effect on when GCs are triggered, only on what objects the GC looks
426 # into. This means that O(number of files) GCs are unavoidable.
430 # into. This means that O(number of files) GCs are unavoidable.
427 # Depending on when in the process's lifetime the dirstate is parsed,
431 # Depending on when in the process's lifetime the dirstate is parsed,
428 # this can get very expensive. As a workaround, disable GC while
432 # this can get very expensive. As a workaround, disable GC while
429 # parsing the dirstate.
433 # parsing the dirstate.
430 #
434 #
431 # (we cannot decorate the function directly since it is in a C module)
435 # (we cannot decorate the function directly since it is in a C module)
432 parse_dirstate = util.nogc(parsers.parse_dirstate)
436 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 p = parse_dirstate(self._map, self._copymap, st)
437 p = parse_dirstate(self._map, self._copymap, st)
434 if not self._dirtypl:
438 if not self._dirtypl:
435 self._pl = p
439 self._pl = p
436
440
437 def invalidate(self):
441 def invalidate(self):
438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
439 "_pl", "_dirs", "_ignore"):
443 "_pl", "_dirs", "_ignore"):
440 if a in self.__dict__:
444 if a in self.__dict__:
441 delattr(self, a)
445 delattr(self, a)
442 self._lastnormaltime = 0
446 self._lastnormaltime = 0
443 self._dirty = False
447 self._dirty = False
444 self._parentwriters = 0
448 self._parentwriters = 0
445
449
446 def copy(self, source, dest):
450 def copy(self, source, dest):
447 """Mark dest as a copy of source. Unmark dest if source is None."""
451 """Mark dest as a copy of source. Unmark dest if source is None."""
448 if source == dest:
452 if source == dest:
449 return
453 return
450 self._dirty = True
454 self._dirty = True
451 if source is not None:
455 if source is not None:
452 self._copymap[dest] = source
456 self._copymap[dest] = source
453 elif dest in self._copymap:
457 elif dest in self._copymap:
454 del self._copymap[dest]
458 del self._copymap[dest]
455
459
456 def copied(self, file):
460 def copied(self, file):
457 return self._copymap.get(file, None)
461 return self._copymap.get(file, None)
458
462
459 def copies(self):
463 def copies(self):
460 return self._copymap
464 return self._copymap
461
465
462 def _droppath(self, f):
466 def _droppath(self, f):
463 if self[f] not in "?r" and "_dirs" in self.__dict__:
467 if self[f] not in "?r" and "_dirs" in self.__dict__:
464 self._dirs.delpath(f)
468 self._dirs.delpath(f)
465
469
466 if "_filefoldmap" in self.__dict__:
470 if "_filefoldmap" in self.__dict__:
467 normed = util.normcase(f)
471 normed = util.normcase(f)
468 if normed in self._filefoldmap:
472 if normed in self._filefoldmap:
469 del self._filefoldmap[normed]
473 del self._filefoldmap[normed]
470
474
471 def _addpath(self, f, state, mode, size, mtime):
475 def _addpath(self, f, state, mode, size, mtime):
472 oldstate = self[f]
476 oldstate = self[f]
473 if state == 'a' or oldstate == 'r':
477 if state == 'a' or oldstate == 'r':
474 scmutil.checkfilename(f)
478 scmutil.checkfilename(f)
475 if f in self._dirs:
479 if f in self._dirs:
476 raise error.Abort(_('directory %r already in dirstate') % f)
480 raise error.Abort(_('directory %r already in dirstate') % f)
477 # shadows
481 # shadows
478 for d in util.finddirs(f):
482 for d in util.finddirs(f):
479 if d in self._dirs:
483 if d in self._dirs:
480 break
484 break
481 if d in self._map and self[d] != 'r':
485 if d in self._map and self[d] != 'r':
482 raise error.Abort(
486 raise error.Abort(
483 _('file %r in dirstate clashes with %r') % (d, f))
487 _('file %r in dirstate clashes with %r') % (d, f))
484 if oldstate in "?r" and "_dirs" in self.__dict__:
488 if oldstate in "?r" and "_dirs" in self.__dict__:
485 self._dirs.addpath(f)
489 self._dirs.addpath(f)
486 self._dirty = True
490 self._dirty = True
487 self._map[f] = dirstatetuple(state, mode, size, mtime)
491 self._map[f] = dirstatetuple(state, mode, size, mtime)
488
492
489 def normal(self, f):
493 def normal(self, f):
490 '''Mark a file normal and clean.'''
494 '''Mark a file normal and clean.'''
491 s = os.lstat(self._join(f))
495 s = os.lstat(self._join(f))
492 mtime = s.st_mtime
496 mtime = s.st_mtime
493 self._addpath(f, 'n', s.st_mode,
497 self._addpath(f, 'n', s.st_mode,
494 s.st_size & _rangemask, mtime & _rangemask)
498 s.st_size & _rangemask, mtime & _rangemask)
495 if f in self._copymap:
499 if f in self._copymap:
496 del self._copymap[f]
500 del self._copymap[f]
497 if mtime > self._lastnormaltime:
501 if mtime > self._lastnormaltime:
498 # Remember the most recent modification timeslot for status(),
502 # Remember the most recent modification timeslot for status(),
499 # to make sure we won't miss future size-preserving file content
503 # to make sure we won't miss future size-preserving file content
500 # modifications that happen within the same timeslot.
504 # modifications that happen within the same timeslot.
501 self._lastnormaltime = mtime
505 self._lastnormaltime = mtime
502
506
503 def normallookup(self, f):
507 def normallookup(self, f):
504 '''Mark a file normal, but possibly dirty.'''
508 '''Mark a file normal, but possibly dirty.'''
505 if self._pl[1] != nullid and f in self._map:
509 if self._pl[1] != nullid and f in self._map:
506 # if there is a merge going on and the file was either
510 # if there is a merge going on and the file was either
507 # in state 'm' (-1) or coming from other parent (-2) before
511 # in state 'm' (-1) or coming from other parent (-2) before
508 # being removed, restore that state.
512 # being removed, restore that state.
509 entry = self._map[f]
513 entry = self._map[f]
510 if entry[0] == 'r' and entry[2] in (-1, -2):
514 if entry[0] == 'r' and entry[2] in (-1, -2):
511 source = self._copymap.get(f)
515 source = self._copymap.get(f)
512 if entry[2] == -1:
516 if entry[2] == -1:
513 self.merge(f)
517 self.merge(f)
514 elif entry[2] == -2:
518 elif entry[2] == -2:
515 self.otherparent(f)
519 self.otherparent(f)
516 if source:
520 if source:
517 self.copy(source, f)
521 self.copy(source, f)
518 return
522 return
519 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
520 return
524 return
521 self._addpath(f, 'n', 0, -1, -1)
525 self._addpath(f, 'n', 0, -1, -1)
522 if f in self._copymap:
526 if f in self._copymap:
523 del self._copymap[f]
527 del self._copymap[f]
524
528
525 def otherparent(self, f):
529 def otherparent(self, f):
526 '''Mark as coming from the other parent, always dirty.'''
530 '''Mark as coming from the other parent, always dirty.'''
527 if self._pl[1] == nullid:
531 if self._pl[1] == nullid:
528 raise error.Abort(_("setting %r to other parent "
532 raise error.Abort(_("setting %r to other parent "
529 "only allowed in merges") % f)
533 "only allowed in merges") % f)
530 if f in self and self[f] == 'n':
534 if f in self and self[f] == 'n':
531 # merge-like
535 # merge-like
532 self._addpath(f, 'm', 0, -2, -1)
536 self._addpath(f, 'm', 0, -2, -1)
533 else:
537 else:
534 # add-like
538 # add-like
535 self._addpath(f, 'n', 0, -2, -1)
539 self._addpath(f, 'n', 0, -2, -1)
536
540
537 if f in self._copymap:
541 if f in self._copymap:
538 del self._copymap[f]
542 del self._copymap[f]
539
543
540 def add(self, f):
544 def add(self, f):
541 '''Mark a file added.'''
545 '''Mark a file added.'''
542 self._addpath(f, 'a', 0, -1, -1)
546 self._addpath(f, 'a', 0, -1, -1)
543 if f in self._copymap:
547 if f in self._copymap:
544 del self._copymap[f]
548 del self._copymap[f]
545
549
546 def remove(self, f):
550 def remove(self, f):
547 '''Mark a file removed.'''
551 '''Mark a file removed.'''
548 self._dirty = True
552 self._dirty = True
549 self._droppath(f)
553 self._droppath(f)
550 size = 0
554 size = 0
551 if self._pl[1] != nullid and f in self._map:
555 if self._pl[1] != nullid and f in self._map:
552 # backup the previous state
556 # backup the previous state
553 entry = self._map[f]
557 entry = self._map[f]
554 if entry[0] == 'm': # merge
558 if entry[0] == 'm': # merge
555 size = -1
559 size = -1
556 elif entry[0] == 'n' and entry[2] == -2: # other parent
560 elif entry[0] == 'n' and entry[2] == -2: # other parent
557 size = -2
561 size = -2
558 self._map[f] = dirstatetuple('r', 0, size, 0)
562 self._map[f] = dirstatetuple('r', 0, size, 0)
559 if size == 0 and f in self._copymap:
563 if size == 0 and f in self._copymap:
560 del self._copymap[f]
564 del self._copymap[f]
561
565
562 def merge(self, f):
566 def merge(self, f):
563 '''Mark a file merged.'''
567 '''Mark a file merged.'''
564 if self._pl[1] == nullid:
568 if self._pl[1] == nullid:
565 return self.normallookup(f)
569 return self.normallookup(f)
566 return self.otherparent(f)
570 return self.otherparent(f)
567
571
568 def drop(self, f):
572 def drop(self, f):
569 '''Drop a file from the dirstate'''
573 '''Drop a file from the dirstate'''
570 if f in self._map:
574 if f in self._map:
571 self._dirty = True
575 self._dirty = True
572 self._droppath(f)
576 self._droppath(f)
573 del self._map[f]
577 del self._map[f]
574
578
575 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
579 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
576 if exists is None:
580 if exists is None:
577 exists = os.path.lexists(os.path.join(self._root, path))
581 exists = os.path.lexists(os.path.join(self._root, path))
578 if not exists:
582 if not exists:
579 # Maybe a path component exists
583 # Maybe a path component exists
580 if not ignoremissing and '/' in path:
584 if not ignoremissing and '/' in path:
581 d, f = path.rsplit('/', 1)
585 d, f = path.rsplit('/', 1)
582 d = self._normalize(d, False, ignoremissing, None)
586 d = self._normalize(d, False, ignoremissing, None)
583 folded = d + "/" + f
587 folded = d + "/" + f
584 else:
588 else:
585 # No path components, preserve original case
589 # No path components, preserve original case
586 folded = path
590 folded = path
587 else:
591 else:
588 # recursively normalize leading directory components
592 # recursively normalize leading directory components
589 # against dirstate
593 # against dirstate
590 if '/' in normed:
594 if '/' in normed:
591 d, f = normed.rsplit('/', 1)
595 d, f = normed.rsplit('/', 1)
592 d = self._normalize(d, False, ignoremissing, True)
596 d = self._normalize(d, False, ignoremissing, True)
593 r = self._root + "/" + d
597 r = self._root + "/" + d
594 folded = d + "/" + util.fspath(f, r)
598 folded = d + "/" + util.fspath(f, r)
595 else:
599 else:
596 folded = util.fspath(normed, self._root)
600 folded = util.fspath(normed, self._root)
597 storemap[normed] = folded
601 storemap[normed] = folded
598
602
599 return folded
603 return folded
600
604
601 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
605 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
602 normed = util.normcase(path)
606 normed = util.normcase(path)
603 folded = self._filefoldmap.get(normed, None)
607 folded = self._filefoldmap.get(normed, None)
604 if folded is None:
608 if folded is None:
605 if isknown:
609 if isknown:
606 folded = path
610 folded = path
607 else:
611 else:
608 folded = self._discoverpath(path, normed, ignoremissing, exists,
612 folded = self._discoverpath(path, normed, ignoremissing, exists,
609 self._filefoldmap)
613 self._filefoldmap)
610 return folded
614 return folded
611
615
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
616 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
613 normed = util.normcase(path)
617 normed = util.normcase(path)
614 folded = self._filefoldmap.get(normed, None)
618 folded = self._filefoldmap.get(normed, None)
615 if folded is None:
619 if folded is None:
616 folded = self._dirfoldmap.get(normed, None)
620 folded = self._dirfoldmap.get(normed, None)
617 if folded is None:
621 if folded is None:
618 if isknown:
622 if isknown:
619 folded = path
623 folded = path
620 else:
624 else:
621 # store discovered result in dirfoldmap so that future
625 # store discovered result in dirfoldmap so that future
622 # normalizefile calls don't start matching directories
626 # normalizefile calls don't start matching directories
623 folded = self._discoverpath(path, normed, ignoremissing, exists,
627 folded = self._discoverpath(path, normed, ignoremissing, exists,
624 self._dirfoldmap)
628 self._dirfoldmap)
625 return folded
629 return folded
626
630
627 def normalize(self, path, isknown=False, ignoremissing=False):
631 def normalize(self, path, isknown=False, ignoremissing=False):
628 '''
632 '''
629 normalize the case of a pathname when on a casefolding filesystem
633 normalize the case of a pathname when on a casefolding filesystem
630
634
631 isknown specifies whether the filename came from walking the
635 isknown specifies whether the filename came from walking the
632 disk, to avoid extra filesystem access.
636 disk, to avoid extra filesystem access.
633
637
634 If ignoremissing is True, missing path are returned
638 If ignoremissing is True, missing path are returned
635 unchanged. Otherwise, we try harder to normalize possibly
639 unchanged. Otherwise, we try harder to normalize possibly
636 existing path components.
640 existing path components.
637
641
638 The normalized case is determined based on the following precedence:
642 The normalized case is determined based on the following precedence:
639
643
640 - version of name already stored in the dirstate
644 - version of name already stored in the dirstate
641 - version of name stored on disk
645 - version of name stored on disk
642 - version provided via command arguments
646 - version provided via command arguments
643 '''
647 '''
644
648
645 if self._checkcase:
649 if self._checkcase:
646 return self._normalize(path, isknown, ignoremissing)
650 return self._normalize(path, isknown, ignoremissing)
647 return path
651 return path
648
652
649 def clear(self):
653 def clear(self):
650 self._map = {}
654 self._map = {}
651 if "_dirs" in self.__dict__:
655 if "_dirs" in self.__dict__:
652 delattr(self, "_dirs")
656 delattr(self, "_dirs")
653 self._copymap = {}
657 self._copymap = {}
654 self._pl = [nullid, nullid]
658 self._pl = [nullid, nullid]
655 self._lastnormaltime = 0
659 self._lastnormaltime = 0
656 self._dirty = True
660 self._dirty = True
657
661
658 def rebuild(self, parent, allfiles, changedfiles=None):
662 def rebuild(self, parent, allfiles, changedfiles=None):
659 if changedfiles is None:
663 if changedfiles is None:
660 # Rebuild entire dirstate
664 # Rebuild entire dirstate
661 changedfiles = allfiles
665 changedfiles = allfiles
662 lastnormaltime = self._lastnormaltime
666 lastnormaltime = self._lastnormaltime
663 self.clear()
667 self.clear()
664 self._lastnormaltime = lastnormaltime
668 self._lastnormaltime = lastnormaltime
665
669
666 for f in changedfiles:
670 for f in changedfiles:
667 mode = 0o666
671 mode = 0o666
668 if f in allfiles and 'x' in allfiles.flags(f):
672 if f in allfiles and 'x' in allfiles.flags(f):
669 mode = 0o777
673 mode = 0o777
670
674
671 if f in allfiles:
675 if f in allfiles:
672 self._map[f] = dirstatetuple('n', mode, -1, 0)
676 self._map[f] = dirstatetuple('n', mode, -1, 0)
673 else:
677 else:
674 self._map.pop(f, None)
678 self._map.pop(f, None)
675
679
676 self._pl = (parent, nullid)
680 self._pl = (parent, nullid)
677 self._dirty = True
681 self._dirty = True
678
682
679 def write(self, tr=False):
683 def write(self, tr=False):
680 if not self._dirty:
684 if not self._dirty:
681 return
685 return
682
686
683 filename = self._filename
687 filename = self._filename
684 if tr is False: # not explicitly specified
688 if tr is False: # not explicitly specified
685 if (self._ui.configbool('devel', 'all-warnings')
689 if (self._ui.configbool('devel', 'all-warnings')
686 or self._ui.configbool('devel', 'check-dirstate-write')):
690 or self._ui.configbool('devel', 'check-dirstate-write')):
687 self._ui.develwarn('use dirstate.write with '
691 self._ui.develwarn('use dirstate.write with '
688 'repo.currenttransaction()')
692 'repo.currenttransaction()')
689
693
690 if self._opener.lexists(self._pendingfilename):
694 if self._opener.lexists(self._pendingfilename):
691 # if pending file already exists, in-memory changes
695 # if pending file already exists, in-memory changes
692 # should be written into it, because it has priority
696 # should be written into it, because it has priority
693 # to '.hg/dirstate' at reading under HG_PENDING mode
697 # to '.hg/dirstate' at reading under HG_PENDING mode
694 filename = self._pendingfilename
698 filename = self._pendingfilename
695 elif tr:
699 elif tr:
696 # 'dirstate.write()' is not only for writing in-memory
700 # 'dirstate.write()' is not only for writing in-memory
697 # changes out, but also for dropping ambiguous timestamp.
701 # changes out, but also for dropping ambiguous timestamp.
698 # delayed writing re-raise "ambiguous timestamp issue".
702 # delayed writing re-raise "ambiguous timestamp issue".
699 # See also the wiki page below for detail:
703 # See also the wiki page below for detail:
700 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
704 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
701
705
702 # emulate dropping timestamp in 'parsers.pack_dirstate'
706 # emulate dropping timestamp in 'parsers.pack_dirstate'
703 now = _getfsnow(self._opener)
707 now = _getfsnow(self._opener)
704 dmap = self._map
708 dmap = self._map
705 for f, e in dmap.iteritems():
709 for f, e in dmap.iteritems():
706 if e[0] == 'n' and e[3] == now:
710 if e[0] == 'n' and e[3] == now:
707 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
711 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
708
712
709 # emulate that all 'dirstate.normal' results are written out
713 # emulate that all 'dirstate.normal' results are written out
710 self._lastnormaltime = 0
714 self._lastnormaltime = 0
711
715
712 # delay writing in-memory changes out
716 # delay writing in-memory changes out
713 tr.addfilegenerator('dirstate', (self._filename,),
717 tr.addfilegenerator('dirstate', (self._filename,),
714 self._writedirstate, location='plain')
718 self._writedirstate, location='plain')
715 return
719 return
716
720
717 st = self._opener(filename, "w", atomictemp=True)
721 st = self._opener(filename, "w", atomictemp=True)
718 self._writedirstate(st)
722 self._writedirstate(st)
719
723
720 def _writedirstate(self, st):
724 def _writedirstate(self, st):
721 # use the modification time of the newly created temporary file as the
725 # use the modification time of the newly created temporary file as the
722 # filesystem's notion of 'now'
726 # filesystem's notion of 'now'
723 now = util.fstat(st).st_mtime & _rangemask
727 now = util.fstat(st).st_mtime & _rangemask
724
728
725 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
729 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
726 # timestamp of each entries in dirstate, because of 'now > mtime'
730 # timestamp of each entries in dirstate, because of 'now > mtime'
727 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
731 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
728 if delaywrite > 0:
732 if delaywrite > 0:
729 # do we have any files to delay for?
733 # do we have any files to delay for?
730 for f, e in self._map.iteritems():
734 for f, e in self._map.iteritems():
731 if e[0] == 'n' and e[3] == now:
735 if e[0] == 'n' and e[3] == now:
732 import time # to avoid useless import
736 import time # to avoid useless import
733 # rather than sleep n seconds, sleep until the next
737 # rather than sleep n seconds, sleep until the next
734 # multiple of n seconds
738 # multiple of n seconds
735 clock = time.time()
739 clock = time.time()
736 start = int(clock) - (int(clock) % delaywrite)
740 start = int(clock) - (int(clock) % delaywrite)
737 end = start + delaywrite
741 end = start + delaywrite
738 time.sleep(end - clock)
742 time.sleep(end - clock)
739 break
743 break
740
744
741 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
745 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
742 st.close()
746 st.close()
743 self._lastnormaltime = 0
747 self._lastnormaltime = 0
744 self._dirty = self._dirtypl = False
748 self._dirty = self._dirtypl = False
745
749
746 def _dirignore(self, f):
750 def _dirignore(self, f):
747 if f == '.':
751 if f == '.':
748 return False
752 return False
749 if self._ignore(f):
753 if self._ignore(f):
750 return True
754 return True
751 for p in util.finddirs(f):
755 for p in util.finddirs(f):
752 if self._ignore(p):
756 if self._ignore(p):
753 return True
757 return True
754 return False
758 return False
755
759
756 def _walkexplicit(self, match, subrepos):
760 def _walkexplicit(self, match, subrepos):
757 '''Get stat data about the files explicitly specified by match.
761 '''Get stat data about the files explicitly specified by match.
758
762
759 Return a triple (results, dirsfound, dirsnotfound).
763 Return a triple (results, dirsfound, dirsnotfound).
760 - results is a mapping from filename to stat result. It also contains
764 - results is a mapping from filename to stat result. It also contains
761 listings mapping subrepos and .hg to None.
765 listings mapping subrepos and .hg to None.
762 - dirsfound is a list of files found to be directories.
766 - dirsfound is a list of files found to be directories.
763 - dirsnotfound is a list of files that the dirstate thinks are
767 - dirsnotfound is a list of files that the dirstate thinks are
764 directories and that were not found.'''
768 directories and that were not found.'''
765
769
766 def badtype(mode):
770 def badtype(mode):
767 kind = _('unknown')
771 kind = _('unknown')
768 if stat.S_ISCHR(mode):
772 if stat.S_ISCHR(mode):
769 kind = _('character device')
773 kind = _('character device')
770 elif stat.S_ISBLK(mode):
774 elif stat.S_ISBLK(mode):
771 kind = _('block device')
775 kind = _('block device')
772 elif stat.S_ISFIFO(mode):
776 elif stat.S_ISFIFO(mode):
773 kind = _('fifo')
777 kind = _('fifo')
774 elif stat.S_ISSOCK(mode):
778 elif stat.S_ISSOCK(mode):
775 kind = _('socket')
779 kind = _('socket')
776 elif stat.S_ISDIR(mode):
780 elif stat.S_ISDIR(mode):
777 kind = _('directory')
781 kind = _('directory')
778 return _('unsupported file type (type is %s)') % kind
782 return _('unsupported file type (type is %s)') % kind
779
783
780 matchedir = match.explicitdir
784 matchedir = match.explicitdir
781 badfn = match.bad
785 badfn = match.bad
782 dmap = self._map
786 dmap = self._map
783 lstat = os.lstat
787 lstat = os.lstat
784 getkind = stat.S_IFMT
788 getkind = stat.S_IFMT
785 dirkind = stat.S_IFDIR
789 dirkind = stat.S_IFDIR
786 regkind = stat.S_IFREG
790 regkind = stat.S_IFREG
787 lnkkind = stat.S_IFLNK
791 lnkkind = stat.S_IFLNK
788 join = self._join
792 join = self._join
789 dirsfound = []
793 dirsfound = []
790 foundadd = dirsfound.append
794 foundadd = dirsfound.append
791 dirsnotfound = []
795 dirsnotfound = []
792 notfoundadd = dirsnotfound.append
796 notfoundadd = dirsnotfound.append
793
797
794 if not match.isexact() and self._checkcase:
798 if not match.isexact() and self._checkcase:
795 normalize = self._normalize
799 normalize = self._normalize
796 else:
800 else:
797 normalize = None
801 normalize = None
798
802
799 files = sorted(match.files())
803 files = sorted(match.files())
800 subrepos.sort()
804 subrepos.sort()
801 i, j = 0, 0
805 i, j = 0, 0
802 while i < len(files) and j < len(subrepos):
806 while i < len(files) and j < len(subrepos):
803 subpath = subrepos[j] + "/"
807 subpath = subrepos[j] + "/"
804 if files[i] < subpath:
808 if files[i] < subpath:
805 i += 1
809 i += 1
806 continue
810 continue
807 while i < len(files) and files[i].startswith(subpath):
811 while i < len(files) and files[i].startswith(subpath):
808 del files[i]
812 del files[i]
809 j += 1
813 j += 1
810
814
811 if not files or '.' in files:
815 if not files or '.' in files:
812 files = ['.']
816 files = ['.']
813 results = dict.fromkeys(subrepos)
817 results = dict.fromkeys(subrepos)
814 results['.hg'] = None
818 results['.hg'] = None
815
819
816 alldirs = None
820 alldirs = None
817 for ff in files:
821 for ff in files:
818 # constructing the foldmap is expensive, so don't do it for the
822 # constructing the foldmap is expensive, so don't do it for the
819 # common case where files is ['.']
823 # common case where files is ['.']
820 if normalize and ff != '.':
824 if normalize and ff != '.':
821 nf = normalize(ff, False, True)
825 nf = normalize(ff, False, True)
822 else:
826 else:
823 nf = ff
827 nf = ff
824 if nf in results:
828 if nf in results:
825 continue
829 continue
826
830
827 try:
831 try:
828 st = lstat(join(nf))
832 st = lstat(join(nf))
829 kind = getkind(st.st_mode)
833 kind = getkind(st.st_mode)
830 if kind == dirkind:
834 if kind == dirkind:
831 if nf in dmap:
835 if nf in dmap:
832 # file replaced by dir on disk but still in dirstate
836 # file replaced by dir on disk but still in dirstate
833 results[nf] = None
837 results[nf] = None
834 if matchedir:
838 if matchedir:
835 matchedir(nf)
839 matchedir(nf)
836 foundadd((nf, ff))
840 foundadd((nf, ff))
837 elif kind == regkind or kind == lnkkind:
841 elif kind == regkind or kind == lnkkind:
838 results[nf] = st
842 results[nf] = st
839 else:
843 else:
840 badfn(ff, badtype(kind))
844 badfn(ff, badtype(kind))
841 if nf in dmap:
845 if nf in dmap:
842 results[nf] = None
846 results[nf] = None
843 except OSError as inst: # nf not found on disk - it is dirstate only
847 except OSError as inst: # nf not found on disk - it is dirstate only
844 if nf in dmap: # does it exactly match a missing file?
848 if nf in dmap: # does it exactly match a missing file?
845 results[nf] = None
849 results[nf] = None
846 else: # does it match a missing directory?
850 else: # does it match a missing directory?
847 if alldirs is None:
851 if alldirs is None:
848 alldirs = util.dirs(dmap)
852 alldirs = util.dirs(dmap)
849 if nf in alldirs:
853 if nf in alldirs:
850 if matchedir:
854 if matchedir:
851 matchedir(nf)
855 matchedir(nf)
852 notfoundadd(nf)
856 notfoundadd(nf)
853 else:
857 else:
854 badfn(ff, inst.strerror)
858 badfn(ff, inst.strerror)
855
859
856 # Case insensitive filesystems cannot rely on lstat() failing to detect
860 # Case insensitive filesystems cannot rely on lstat() failing to detect
857 # a case-only rename. Prune the stat object for any file that does not
861 # a case-only rename. Prune the stat object for any file that does not
858 # match the case in the filesystem, if there are multiple files that
862 # match the case in the filesystem, if there are multiple files that
859 # normalize to the same path.
863 # normalize to the same path.
860 if match.isexact() and self._checkcase:
864 if match.isexact() and self._checkcase:
861 normed = {}
865 normed = {}
862
866
863 for f, st in results.iteritems():
867 for f, st in results.iteritems():
864 if st is None:
868 if st is None:
865 continue
869 continue
866
870
867 nc = util.normcase(f)
871 nc = util.normcase(f)
868 paths = normed.get(nc)
872 paths = normed.get(nc)
869
873
870 if paths is None:
874 if paths is None:
871 paths = set()
875 paths = set()
872 normed[nc] = paths
876 normed[nc] = paths
873
877
874 paths.add(f)
878 paths.add(f)
875
879
876 for norm, paths in normed.iteritems():
880 for norm, paths in normed.iteritems():
877 if len(paths) > 1:
881 if len(paths) > 1:
878 for path in paths:
882 for path in paths:
879 folded = self._discoverpath(path, norm, True, None,
883 folded = self._discoverpath(path, norm, True, None,
880 self._dirfoldmap)
884 self._dirfoldmap)
881 if path != folded:
885 if path != folded:
882 results[path] = None
886 results[path] = None
883
887
884 return results, dirsfound, dirsnotfound
888 return results, dirsfound, dirsnotfound
885
889
886 def walk(self, match, subrepos, unknown, ignored, full=True):
890 def walk(self, match, subrepos, unknown, ignored, full=True):
887 '''
891 '''
888 Walk recursively through the directory tree, finding all files
892 Walk recursively through the directory tree, finding all files
889 matched by match.
893 matched by match.
890
894
891 If full is False, maybe skip some known-clean files.
895 If full is False, maybe skip some known-clean files.
892
896
893 Return a dict mapping filename to stat-like object (either
897 Return a dict mapping filename to stat-like object (either
894 mercurial.osutil.stat instance or return value of os.stat()).
898 mercurial.osutil.stat instance or return value of os.stat()).
895
899
896 '''
900 '''
897 # full is a flag that extensions that hook into walk can use -- this
901 # full is a flag that extensions that hook into walk can use -- this
898 # implementation doesn't use it at all. This satisfies the contract
902 # implementation doesn't use it at all. This satisfies the contract
899 # because we only guarantee a "maybe".
903 # because we only guarantee a "maybe".
900
904
901 if ignored:
905 if ignored:
902 ignore = util.never
906 ignore = util.never
903 dirignore = util.never
907 dirignore = util.never
904 elif unknown:
908 elif unknown:
905 ignore = self._ignore
909 ignore = self._ignore
906 dirignore = self._dirignore
910 dirignore = self._dirignore
907 else:
911 else:
908 # if not unknown and not ignored, drop dir recursion and step 2
912 # if not unknown and not ignored, drop dir recursion and step 2
909 ignore = util.always
913 ignore = util.always
910 dirignore = util.always
914 dirignore = util.always
911
915
912 matchfn = match.matchfn
916 matchfn = match.matchfn
913 matchalways = match.always()
917 matchalways = match.always()
914 matchtdir = match.traversedir
918 matchtdir = match.traversedir
915 dmap = self._map
919 dmap = self._map
916 listdir = osutil.listdir
920 listdir = osutil.listdir
917 lstat = os.lstat
921 lstat = os.lstat
918 dirkind = stat.S_IFDIR
922 dirkind = stat.S_IFDIR
919 regkind = stat.S_IFREG
923 regkind = stat.S_IFREG
920 lnkkind = stat.S_IFLNK
924 lnkkind = stat.S_IFLNK
921 join = self._join
925 join = self._join
922
926
923 exact = skipstep3 = False
927 exact = skipstep3 = False
924 if match.isexact(): # match.exact
928 if match.isexact(): # match.exact
925 exact = True
929 exact = True
926 dirignore = util.always # skip step 2
930 dirignore = util.always # skip step 2
927 elif match.prefix(): # match.match, no patterns
931 elif match.prefix(): # match.match, no patterns
928 skipstep3 = True
932 skipstep3 = True
929
933
930 if not exact and self._checkcase:
934 if not exact and self._checkcase:
931 normalize = self._normalize
935 normalize = self._normalize
932 normalizefile = self._normalizefile
936 normalizefile = self._normalizefile
933 skipstep3 = False
937 skipstep3 = False
934 else:
938 else:
935 normalize = self._normalize
939 normalize = self._normalize
936 normalizefile = None
940 normalizefile = None
937
941
938 # step 1: find all explicit files
942 # step 1: find all explicit files
939 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
943 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
940
944
941 skipstep3 = skipstep3 and not (work or dirsnotfound)
945 skipstep3 = skipstep3 and not (work or dirsnotfound)
942 work = [d for d in work if not dirignore(d[0])]
946 work = [d for d in work if not dirignore(d[0])]
943
947
944 # step 2: visit subdirectories
948 # step 2: visit subdirectories
945 def traverse(work, alreadynormed):
949 def traverse(work, alreadynormed):
946 wadd = work.append
950 wadd = work.append
947 while work:
951 while work:
948 nd = work.pop()
952 nd = work.pop()
949 skip = None
953 skip = None
950 if nd == '.':
954 if nd == '.':
951 nd = ''
955 nd = ''
952 else:
956 else:
953 skip = '.hg'
957 skip = '.hg'
954 try:
958 try:
955 entries = listdir(join(nd), stat=True, skip=skip)
959 entries = listdir(join(nd), stat=True, skip=skip)
956 except OSError as inst:
960 except OSError as inst:
957 if inst.errno in (errno.EACCES, errno.ENOENT):
961 if inst.errno in (errno.EACCES, errno.ENOENT):
958 match.bad(self.pathto(nd), inst.strerror)
962 match.bad(self.pathto(nd), inst.strerror)
959 continue
963 continue
960 raise
964 raise
961 for f, kind, st in entries:
965 for f, kind, st in entries:
962 if normalizefile:
966 if normalizefile:
963 # even though f might be a directory, we're only
967 # even though f might be a directory, we're only
964 # interested in comparing it to files currently in the
968 # interested in comparing it to files currently in the
965 # dmap -- therefore normalizefile is enough
969 # dmap -- therefore normalizefile is enough
966 nf = normalizefile(nd and (nd + "/" + f) or f, True,
970 nf = normalizefile(nd and (nd + "/" + f) or f, True,
967 True)
971 True)
968 else:
972 else:
969 nf = nd and (nd + "/" + f) or f
973 nf = nd and (nd + "/" + f) or f
970 if nf not in results:
974 if nf not in results:
971 if kind == dirkind:
975 if kind == dirkind:
972 if not ignore(nf):
976 if not ignore(nf):
973 if matchtdir:
977 if matchtdir:
974 matchtdir(nf)
978 matchtdir(nf)
975 wadd(nf)
979 wadd(nf)
976 if nf in dmap and (matchalways or matchfn(nf)):
980 if nf in dmap and (matchalways or matchfn(nf)):
977 results[nf] = None
981 results[nf] = None
978 elif kind == regkind or kind == lnkkind:
982 elif kind == regkind or kind == lnkkind:
979 if nf in dmap:
983 if nf in dmap:
980 if matchalways or matchfn(nf):
984 if matchalways or matchfn(nf):
981 results[nf] = st
985 results[nf] = st
982 elif ((matchalways or matchfn(nf))
986 elif ((matchalways or matchfn(nf))
983 and not ignore(nf)):
987 and not ignore(nf)):
984 # unknown file -- normalize if necessary
988 # unknown file -- normalize if necessary
985 if not alreadynormed:
989 if not alreadynormed:
986 nf = normalize(nf, False, True)
990 nf = normalize(nf, False, True)
987 results[nf] = st
991 results[nf] = st
988 elif nf in dmap and (matchalways or matchfn(nf)):
992 elif nf in dmap and (matchalways or matchfn(nf)):
989 results[nf] = None
993 results[nf] = None
990
994
991 for nd, d in work:
995 for nd, d in work:
992 # alreadynormed means that processwork doesn't have to do any
996 # alreadynormed means that processwork doesn't have to do any
993 # expensive directory normalization
997 # expensive directory normalization
994 alreadynormed = not normalize or nd == d
998 alreadynormed = not normalize or nd == d
995 traverse([d], alreadynormed)
999 traverse([d], alreadynormed)
996
1000
997 for s in subrepos:
1001 for s in subrepos:
998 del results[s]
1002 del results[s]
999 del results['.hg']
1003 del results['.hg']
1000
1004
1001 # step 3: visit remaining files from dmap
1005 # step 3: visit remaining files from dmap
1002 if not skipstep3 and not exact:
1006 if not skipstep3 and not exact:
1003 # If a dmap file is not in results yet, it was either
1007 # If a dmap file is not in results yet, it was either
1004 # a) not matching matchfn b) ignored, c) missing, or d) under a
1008 # a) not matching matchfn b) ignored, c) missing, or d) under a
1005 # symlink directory.
1009 # symlink directory.
1006 if not results and matchalways:
1010 if not results and matchalways:
1007 visit = dmap.keys()
1011 visit = dmap.keys()
1008 else:
1012 else:
1009 visit = [f for f in dmap if f not in results and matchfn(f)]
1013 visit = [f for f in dmap if f not in results and matchfn(f)]
1010 visit.sort()
1014 visit.sort()
1011
1015
1012 if unknown:
1016 if unknown:
1013 # unknown == True means we walked all dirs under the roots
1017 # unknown == True means we walked all dirs under the roots
1014 # that wasn't ignored, and everything that matched was stat'ed
1018 # that wasn't ignored, and everything that matched was stat'ed
1015 # and is already in results.
1019 # and is already in results.
1016 # The rest must thus be ignored or under a symlink.
1020 # The rest must thus be ignored or under a symlink.
1017 audit_path = pathutil.pathauditor(self._root)
1021 audit_path = pathutil.pathauditor(self._root)
1018
1022
1019 for nf in iter(visit):
1023 for nf in iter(visit):
1020 # If a stat for the same file was already added with a
1024 # If a stat for the same file was already added with a
1021 # different case, don't add one for this, since that would
1025 # different case, don't add one for this, since that would
1022 # make it appear as if the file exists under both names
1026 # make it appear as if the file exists under both names
1023 # on disk.
1027 # on disk.
1024 if (normalizefile and
1028 if (normalizefile and
1025 normalizefile(nf, True, True) in results):
1029 normalizefile(nf, True, True) in results):
1026 results[nf] = None
1030 results[nf] = None
1027 # Report ignored items in the dmap as long as they are not
1031 # Report ignored items in the dmap as long as they are not
1028 # under a symlink directory.
1032 # under a symlink directory.
1029 elif audit_path.check(nf):
1033 elif audit_path.check(nf):
1030 try:
1034 try:
1031 results[nf] = lstat(join(nf))
1035 results[nf] = lstat(join(nf))
1032 # file was just ignored, no links, and exists
1036 # file was just ignored, no links, and exists
1033 except OSError:
1037 except OSError:
1034 # file doesn't exist
1038 # file doesn't exist
1035 results[nf] = None
1039 results[nf] = None
1036 else:
1040 else:
1037 # It's either missing or under a symlink directory
1041 # It's either missing or under a symlink directory
1038 # which we in this case report as missing
1042 # which we in this case report as missing
1039 results[nf] = None
1043 results[nf] = None
1040 else:
1044 else:
1041 # We may not have walked the full directory tree above,
1045 # We may not have walked the full directory tree above,
1042 # so stat and check everything we missed.
1046 # so stat and check everything we missed.
1043 nf = iter(visit).next
1047 nf = iter(visit).next
1044 for st in util.statfiles([join(i) for i in visit]):
1048 for st in util.statfiles([join(i) for i in visit]):
1045 results[nf()] = st
1049 results[nf()] = st
1046 return results
1050 return results
1047
1051
1048 def status(self, match, subrepos, ignored, clean, unknown):
1052 def status(self, match, subrepos, ignored, clean, unknown):
1049 '''Determine the status of the working copy relative to the
1053 '''Determine the status of the working copy relative to the
1050 dirstate and return a pair of (unsure, status), where status is of type
1054 dirstate and return a pair of (unsure, status), where status is of type
1051 scmutil.status and:
1055 scmutil.status and:
1052
1056
1053 unsure:
1057 unsure:
1054 files that might have been modified since the dirstate was
1058 files that might have been modified since the dirstate was
1055 written, but need to be read to be sure (size is the same
1059 written, but need to be read to be sure (size is the same
1056 but mtime differs)
1060 but mtime differs)
1057 status.modified:
1061 status.modified:
1058 files that have definitely been modified since the dirstate
1062 files that have definitely been modified since the dirstate
1059 was written (different size or mode)
1063 was written (different size or mode)
1060 status.clean:
1064 status.clean:
1061 files that have definitely not been modified since the
1065 files that have definitely not been modified since the
1062 dirstate was written
1066 dirstate was written
1063 '''
1067 '''
1064 listignored, listclean, listunknown = ignored, clean, unknown
1068 listignored, listclean, listunknown = ignored, clean, unknown
1065 lookup, modified, added, unknown, ignored = [], [], [], [], []
1069 lookup, modified, added, unknown, ignored = [], [], [], [], []
1066 removed, deleted, clean = [], [], []
1070 removed, deleted, clean = [], [], []
1067
1071
1068 dmap = self._map
1072 dmap = self._map
1069 ladd = lookup.append # aka "unsure"
1073 ladd = lookup.append # aka "unsure"
1070 madd = modified.append
1074 madd = modified.append
1071 aadd = added.append
1075 aadd = added.append
1072 uadd = unknown.append
1076 uadd = unknown.append
1073 iadd = ignored.append
1077 iadd = ignored.append
1074 radd = removed.append
1078 radd = removed.append
1075 dadd = deleted.append
1079 dadd = deleted.append
1076 cadd = clean.append
1080 cadd = clean.append
1077 mexact = match.exact
1081 mexact = match.exact
1078 dirignore = self._dirignore
1082 dirignore = self._dirignore
1079 checkexec = self._checkexec
1083 checkexec = self._checkexec
1080 copymap = self._copymap
1084 copymap = self._copymap
1081 lastnormaltime = self._lastnormaltime
1085 lastnormaltime = self._lastnormaltime
1082
1086
1083 # We need to do full walks when either
1087 # We need to do full walks when either
1084 # - we're listing all clean files, or
1088 # - we're listing all clean files, or
1085 # - match.traversedir does something, because match.traversedir should
1089 # - match.traversedir does something, because match.traversedir should
1086 # be called for every dir in the working dir
1090 # be called for every dir in the working dir
1087 full = listclean or match.traversedir is not None
1091 full = listclean or match.traversedir is not None
1088 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1092 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1089 full=full).iteritems():
1093 full=full).iteritems():
1090 if fn not in dmap:
1094 if fn not in dmap:
1091 if (listignored or mexact(fn)) and dirignore(fn):
1095 if (listignored or mexact(fn)) and dirignore(fn):
1092 if listignored:
1096 if listignored:
1093 iadd(fn)
1097 iadd(fn)
1094 else:
1098 else:
1095 uadd(fn)
1099 uadd(fn)
1096 continue
1100 continue
1097
1101
1098 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1102 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1099 # written like that for performance reasons. dmap[fn] is not a
1103 # written like that for performance reasons. dmap[fn] is not a
1100 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1104 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1101 # opcode has fast paths when the value to be unpacked is a tuple or
1105 # opcode has fast paths when the value to be unpacked is a tuple or
1102 # a list, but falls back to creating a full-fledged iterator in
1106 # a list, but falls back to creating a full-fledged iterator in
1103 # general. That is much slower than simply accessing and storing the
1107 # general. That is much slower than simply accessing and storing the
1104 # tuple members one by one.
1108 # tuple members one by one.
1105 t = dmap[fn]
1109 t = dmap[fn]
1106 state = t[0]
1110 state = t[0]
1107 mode = t[1]
1111 mode = t[1]
1108 size = t[2]
1112 size = t[2]
1109 time = t[3]
1113 time = t[3]
1110
1114
1111 if not st and state in "nma":
1115 if not st and state in "nma":
1112 dadd(fn)
1116 dadd(fn)
1113 elif state == 'n':
1117 elif state == 'n':
1114 if (size >= 0 and
1118 if (size >= 0 and
1115 ((size != st.st_size and size != st.st_size & _rangemask)
1119 ((size != st.st_size and size != st.st_size & _rangemask)
1116 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1120 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1117 or size == -2 # other parent
1121 or size == -2 # other parent
1118 or fn in copymap):
1122 or fn in copymap):
1119 madd(fn)
1123 madd(fn)
1120 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1124 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1121 ladd(fn)
1125 ladd(fn)
1122 elif st.st_mtime == lastnormaltime:
1126 elif st.st_mtime == lastnormaltime:
1123 # fn may have just been marked as normal and it may have
1127 # fn may have just been marked as normal and it may have
1124 # changed in the same second without changing its size.
1128 # changed in the same second without changing its size.
1125 # This can happen if we quickly do multiple commits.
1129 # This can happen if we quickly do multiple commits.
1126 # Force lookup, so we don't miss such a racy file change.
1130 # Force lookup, so we don't miss such a racy file change.
1127 ladd(fn)
1131 ladd(fn)
1128 elif listclean:
1132 elif listclean:
1129 cadd(fn)
1133 cadd(fn)
1130 elif state == 'm':
1134 elif state == 'm':
1131 madd(fn)
1135 madd(fn)
1132 elif state == 'a':
1136 elif state == 'a':
1133 aadd(fn)
1137 aadd(fn)
1134 elif state == 'r':
1138 elif state == 'r':
1135 radd(fn)
1139 radd(fn)
1136
1140
1137 return (lookup, scmutil.status(modified, added, removed, deleted,
1141 return (lookup, scmutil.status(modified, added, removed, deleted,
1138 unknown, ignored, clean))
1142 unknown, ignored, clean))
1139
1143
1140 def matches(self, match):
1144 def matches(self, match):
1141 '''
1145 '''
1142 return files in the dirstate (in whatever state) filtered by match
1146 return files in the dirstate (in whatever state) filtered by match
1143 '''
1147 '''
1144 dmap = self._map
1148 dmap = self._map
1145 if match.always():
1149 if match.always():
1146 return dmap.keys()
1150 return dmap.keys()
1147 files = match.files()
1151 files = match.files()
1148 if match.isexact():
1152 if match.isexact():
1149 # fast path -- filter the other way around, since typically files is
1153 # fast path -- filter the other way around, since typically files is
1150 # much smaller than dmap
1154 # much smaller than dmap
1151 return [f for f in files if f in dmap]
1155 return [f for f in files if f in dmap]
1152 if match.prefix() and all(fn in dmap for fn in files):
1156 if match.prefix() and all(fn in dmap for fn in files):
1153 # fast path -- all the values are known to be files, so just return
1157 # fast path -- all the values are known to be files, so just return
1154 # that
1158 # that
1155 return list(files)
1159 return list(files)
1156 return [f for f in dmap if match(f)]
1160 return [f for f in dmap if match(f)]
1157
1161
1158 def _actualfilename(self, tr):
1162 def _actualfilename(self, tr):
1159 if tr:
1163 if tr:
1160 return self._pendingfilename
1164 return self._pendingfilename
1161 else:
1165 else:
1162 return self._filename
1166 return self._filename
1163
1167
1164 def _savebackup(self, tr, suffix):
1168 def _savebackup(self, tr, suffix):
1165 '''Save current dirstate into backup file with suffix'''
1169 '''Save current dirstate into backup file with suffix'''
1166 filename = self._actualfilename(tr)
1170 filename = self._actualfilename(tr)
1167
1171
1168 # use '_writedirstate' instead of 'write' to write changes certainly,
1172 # use '_writedirstate' instead of 'write' to write changes certainly,
1169 # because the latter omits writing out if transaction is running.
1173 # because the latter omits writing out if transaction is running.
1170 # output file will be used to create backup of dirstate at this point.
1174 # output file will be used to create backup of dirstate at this point.
1171 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1175 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1172
1176
1173 if tr:
1177 if tr:
1174 # ensure that subsequent tr.writepending returns True for
1178 # ensure that subsequent tr.writepending returns True for
1175 # changes written out above, even if dirstate is never
1179 # changes written out above, even if dirstate is never
1176 # changed after this
1180 # changed after this
1177 tr.addfilegenerator('dirstate', (self._filename,),
1181 tr.addfilegenerator('dirstate', (self._filename,),
1178 self._writedirstate, location='plain')
1182 self._writedirstate, location='plain')
1179
1183
1180 # ensure that pending file written above is unlinked at
1184 # ensure that pending file written above is unlinked at
1181 # failure, even if tr.writepending isn't invoked until the
1185 # failure, even if tr.writepending isn't invoked until the
1182 # end of this transaction
1186 # end of this transaction
1183 tr.registertmp(filename, location='plain')
1187 tr.registertmp(filename, location='plain')
1184
1188
1185 self._opener.write(filename + suffix, self._opener.tryread(filename))
1189 self._opener.write(filename + suffix, self._opener.tryread(filename))
1186
1190
1187 def _restorebackup(self, tr, suffix):
1191 def _restorebackup(self, tr, suffix):
1188 '''Restore dirstate by backup file with suffix'''
1192 '''Restore dirstate by backup file with suffix'''
1189 # this "invalidate()" prevents "wlock.release()" from writing
1193 # this "invalidate()" prevents "wlock.release()" from writing
1190 # changes of dirstate out after restoring from backup file
1194 # changes of dirstate out after restoring from backup file
1191 self.invalidate()
1195 self.invalidate()
1192 filename = self._actualfilename(tr)
1196 filename = self._actualfilename(tr)
1193 self._opener.rename(filename + suffix, filename)
1197 self._opener.rename(filename + suffix, filename)
1194
1198
1195 def _clearbackup(self, tr, suffix):
1199 def _clearbackup(self, tr, suffix):
1196 '''Clear backup file with suffix'''
1200 '''Clear backup file with suffix'''
1197 filename = self._actualfilename(tr)
1201 filename = self._actualfilename(tr)
1198 self._opener.unlink(filename + suffix)
1202 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now