##// END OF EJS Templates
dirstate: perform transactions with _copymap using single call, where possible...
Michael Bolin -
r33983:5cb0a8fe default
parent child Browse files
Show More
@@ -1,1342 +1,1337 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 60 return parsers.nonnormalotherparententries(dmap)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 self._sparsematchfn = sparsematchfn
84 84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 85 # UNC path pointing to root share (issue4557)
86 86 self._rootdir = pathutil.normasprefix(root)
87 87 self._dirty = False
88 88 self._dirtypl = False
89 89 self._lastnormaltime = 0
90 90 self._ui = ui
91 91 self._filecache = {}
92 92 self._parentwriters = 0
93 93 self._filename = 'dirstate'
94 94 self._pendingfilename = '%s.pending' % self._filename
95 95 self._plchangecallbacks = {}
96 96 self._origpl = None
97 97 self._updatedfiles = set()
98 98
99 99 # for consistent view between _pl() and _read() invocations
100 100 self._pendingmode = None
101 101
102 102 @contextlib.contextmanager
103 103 def parentchange(self):
104 104 '''Context manager for handling dirstate parents.
105 105
106 106 If an exception occurs in the scope of the context manager,
107 107 the incoherent dirstate won't be written when wlock is
108 108 released.
109 109 '''
110 110 self._parentwriters += 1
111 111 yield
112 112 # Typically we want the "undo" step of a context manager in a
113 113 # finally block so it happens even when an exception
114 114 # occurs. In this case, however, we only want to decrement
115 115 # parentwriters if the code in the with statement exits
116 116 # normally, so we don't have a try/finally here on purpose.
117 117 self._parentwriters -= 1
118 118
119 119 def beginparentchange(self):
120 120 '''Marks the beginning of a set of changes that involve changing
121 121 the dirstate parents. If there is an exception during this time,
122 122 the dirstate will not be written when the wlock is released. This
123 123 prevents writing an incoherent dirstate where the parent doesn't
124 124 match the contents.
125 125 '''
126 126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 127 'parentchange context manager.', '4.3')
128 128 self._parentwriters += 1
129 129
130 130 def endparentchange(self):
131 131 '''Marks the end of a set of changes that involve changing the
132 132 dirstate parents. Once all parent changes have been marked done,
133 133 the wlock will be free to write the dirstate on release.
134 134 '''
135 135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 136 'parentchange context manager.', '4.3')
137 137 if self._parentwriters > 0:
138 138 self._parentwriters -= 1
139 139
140 140 def pendingparentchange(self):
141 141 '''Returns true if the dirstate is in the middle of a set of changes
142 142 that modify the dirstate parent.
143 143 '''
144 144 return self._parentwriters > 0
145 145
146 146 @propertycache
147 147 def _map(self):
148 148 '''Return the dirstate contents as a map from filename to
149 149 (state, mode, size, time).'''
150 150 self._read()
151 151 return self._map
152 152
153 153 @propertycache
154 154 def _copymap(self):
155 155 self._read()
156 156 return self._copymap
157 157
158 158 @propertycache
159 159 def _identity(self):
160 160 self._read()
161 161 return self._identity
162 162
163 163 @propertycache
164 164 def _nonnormalset(self):
165 165 nonnorm, otherparents = nonnormalentries(self._map)
166 166 self._otherparentset = otherparents
167 167 return nonnorm
168 168
169 169 @propertycache
170 170 def _otherparentset(self):
171 171 nonnorm, otherparents = nonnormalentries(self._map)
172 172 self._nonnormalset = nonnorm
173 173 return otherparents
174 174
175 175 @propertycache
176 176 def _filefoldmap(self):
177 177 try:
178 178 makefilefoldmap = parsers.make_file_foldmap
179 179 except AttributeError:
180 180 pass
181 181 else:
182 182 return makefilefoldmap(self._map, util.normcasespec,
183 183 util.normcasefallback)
184 184
185 185 f = {}
186 186 normcase = util.normcase
187 187 for name, s in self._map.iteritems():
188 188 if s[0] != 'r':
189 189 f[normcase(name)] = name
190 190 f['.'] = '.' # prevents useless util.fspath() invocation
191 191 return f
192 192
193 193 @propertycache
194 194 def _dirfoldmap(self):
195 195 f = {}
196 196 normcase = util.normcase
197 197 for name in self._dirs:
198 198 f[normcase(name)] = name
199 199 return f
200 200
201 201 @property
202 202 def _sparsematcher(self):
203 203 """The matcher for the sparse checkout.
204 204
205 205 The working directory may not include every file from a manifest. The
206 206 matcher obtained by this property will match a path if it is to be
207 207 included in the working directory.
208 208 """
209 209 # TODO there is potential to cache this property. For now, the matcher
210 210 # is resolved on every access. (But the called function does use a
211 211 # cache to keep the lookup fast.)
212 212 return self._sparsematchfn()
213 213
214 214 @repocache('branch')
215 215 def _branch(self):
216 216 try:
217 217 return self._opener.read("branch").strip() or "default"
218 218 except IOError as inst:
219 219 if inst.errno != errno.ENOENT:
220 220 raise
221 221 return "default"
222 222
223 223 @propertycache
224 224 def _pl(self):
225 225 try:
226 226 fp = self._opendirstatefile()
227 227 st = fp.read(40)
228 228 fp.close()
229 229 l = len(st)
230 230 if l == 40:
231 231 return st[:20], st[20:40]
232 232 elif l > 0 and l < 40:
233 233 raise error.Abort(_('working directory state appears damaged!'))
234 234 except IOError as err:
235 235 if err.errno != errno.ENOENT:
236 236 raise
237 237 return [nullid, nullid]
238 238
239 239 @propertycache
240 240 def _dirs(self):
241 241 return util.dirs(self._map, 'r')
242 242
243 243 def dirs(self):
244 244 return self._dirs
245 245
246 246 @rootcache('.hgignore')
247 247 def _ignore(self):
248 248 files = self._ignorefiles()
249 249 if not files:
250 250 return matchmod.never(self._root, '')
251 251
252 252 pats = ['include:%s' % f for f in files]
253 253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254 254
255 255 @propertycache
256 256 def _slash(self):
257 257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258 258
259 259 @propertycache
260 260 def _checklink(self):
261 261 return util.checklink(self._root)
262 262
263 263 @propertycache
264 264 def _checkexec(self):
265 265 return util.checkexec(self._root)
266 266
267 267 @propertycache
268 268 def _checkcase(self):
269 269 return not util.fscasesensitive(self._join('.hg'))
270 270
271 271 def _join(self, f):
272 272 # much faster than os.path.join()
273 273 # it's safe because f is always a relative path
274 274 return self._rootdir + f
275 275
276 276 def flagfunc(self, buildfallback):
277 277 if self._checklink and self._checkexec:
278 278 def f(x):
279 279 try:
280 280 st = os.lstat(self._join(x))
281 281 if util.statislink(st):
282 282 return 'l'
283 283 if util.statisexec(st):
284 284 return 'x'
285 285 except OSError:
286 286 pass
287 287 return ''
288 288 return f
289 289
290 290 fallback = buildfallback()
291 291 if self._checklink:
292 292 def f(x):
293 293 if os.path.islink(self._join(x)):
294 294 return 'l'
295 295 if 'x' in fallback(x):
296 296 return 'x'
297 297 return ''
298 298 return f
299 299 if self._checkexec:
300 300 def f(x):
301 301 if 'l' in fallback(x):
302 302 return 'l'
303 303 if util.isexec(self._join(x)):
304 304 return 'x'
305 305 return ''
306 306 return f
307 307 else:
308 308 return fallback
309 309
310 310 @propertycache
311 311 def _cwd(self):
312 312 # internal config: ui.forcecwd
313 313 forcecwd = self._ui.config('ui', 'forcecwd')
314 314 if forcecwd:
315 315 return forcecwd
316 316 return pycompat.getcwd()
317 317
318 318 def getcwd(self):
319 319 '''Return the path from which a canonical path is calculated.
320 320
321 321 This path should be used to resolve file patterns or to convert
322 322 canonical paths back to file paths for display. It shouldn't be
323 323 used to get real file paths. Use vfs functions instead.
324 324 '''
325 325 cwd = self._cwd
326 326 if cwd == self._root:
327 327 return ''
328 328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 329 rootsep = self._root
330 330 if not util.endswithsep(rootsep):
331 331 rootsep += pycompat.ossep
332 332 if cwd.startswith(rootsep):
333 333 return cwd[len(rootsep):]
334 334 else:
335 335 # we're outside the repo. return an absolute path.
336 336 return cwd
337 337
338 338 def pathto(self, f, cwd=None):
339 339 if cwd is None:
340 340 cwd = self.getcwd()
341 341 path = util.pathto(self._root, cwd, f)
342 342 if self._slash:
343 343 return util.pconvert(path)
344 344 return path
345 345
346 346 def __getitem__(self, key):
347 347 '''Return the current state of key (a filename) in the dirstate.
348 348
349 349 States are:
350 350 n normal
351 351 m needs merging
352 352 r marked for removal
353 353 a marked for addition
354 354 ? not tracked
355 355 '''
356 356 return self._map.get(key, ("?",))[0]
357 357
358 358 def __contains__(self, key):
359 359 return key in self._map
360 360
361 361 def __iter__(self):
362 362 return iter(sorted(self._map))
363 363
364 364 def items(self):
365 365 return self._map.iteritems()
366 366
367 367 iteritems = items
368 368
369 369 def parents(self):
370 370 return [self._validate(p) for p in self._pl]
371 371
372 372 def p1(self):
373 373 return self._validate(self._pl[0])
374 374
375 375 def p2(self):
376 376 return self._validate(self._pl[1])
377 377
378 378 def branch(self):
379 379 return encoding.tolocal(self._branch)
380 380
381 381 def setparents(self, p1, p2=nullid):
382 382 """Set dirstate parents to p1 and p2.
383 383
384 384 When moving from two parents to one, 'm' merged entries a
385 385 adjusted to normal and previous copy records discarded and
386 386 returned by the call.
387 387
388 388 See localrepo.setparents()
389 389 """
390 390 if self._parentwriters == 0:
391 391 raise ValueError("cannot set dirstate parent without "
392 392 "calling dirstate.beginparentchange")
393 393
394 394 self._dirty = self._dirtypl = True
395 395 oldp2 = self._pl[1]
396 396 if self._origpl is None:
397 397 self._origpl = self._pl
398 398 self._pl = p1, p2
399 399 copies = {}
400 400 if oldp2 != nullid and p2 == nullid:
401 401 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 402 for f in candidatefiles:
403 403 s = self._map.get(f)
404 404 if s is None:
405 405 continue
406 406
407 407 # Discard 'm' markers when moving away from a merge state
408 408 if s[0] == 'm':
409 if f in self._copymap:
410 copies[f] = self._copymap[f]
409 source = self._copymap.get(f)
410 if source:
411 copies[f] = source
411 412 self.normallookup(f)
412 413 # Also fix up otherparent markers
413 414 elif s[0] == 'n' and s[2] == -2:
414 if f in self._copymap:
415 copies[f] = self._copymap[f]
415 source = self._copymap.get(f)
416 if source:
417 copies[f] = source
416 418 self.add(f)
417 419 return copies
418 420
419 421 def setbranch(self, branch):
420 422 self._branch = encoding.fromlocal(branch)
421 423 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
422 424 try:
423 425 f.write(self._branch + '\n')
424 426 f.close()
425 427
426 428 # make sure filecache has the correct stat info for _branch after
427 429 # replacing the underlying file
428 430 ce = self._filecache['_branch']
429 431 if ce:
430 432 ce.refresh()
431 433 except: # re-raises
432 434 f.discard()
433 435 raise
434 436
435 437 def _opendirstatefile(self):
436 438 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
437 439 if self._pendingmode is not None and self._pendingmode != mode:
438 440 fp.close()
439 441 raise error.Abort(_('working directory state may be '
440 442 'changed parallelly'))
441 443 self._pendingmode = mode
442 444 return fp
443 445
444 446 def _read(self):
445 447 self._map = {}
446 448 self._copymap = {}
447 449 # ignore HG_PENDING because identity is used only for writing
448 450 self._identity = util.filestat.frompath(
449 451 self._opener.join(self._filename))
450 452 try:
451 453 fp = self._opendirstatefile()
452 454 try:
453 455 st = fp.read()
454 456 finally:
455 457 fp.close()
456 458 except IOError as err:
457 459 if err.errno != errno.ENOENT:
458 460 raise
459 461 return
460 462 if not st:
461 463 return
462 464
463 465 if util.safehasattr(parsers, 'dict_new_presized'):
464 466 # Make an estimate of the number of files in the dirstate based on
465 467 # its size. From a linear regression on a set of real-world repos,
466 468 # all over 10,000 files, the size of a dirstate entry is 85
467 469 # bytes. The cost of resizing is significantly higher than the cost
468 470 # of filling in a larger presized dict, so subtract 20% from the
469 471 # size.
470 472 #
471 473 # This heuristic is imperfect in many ways, so in a future dirstate
472 474 # format update it makes sense to just record the number of entries
473 475 # on write.
474 476 self._map = parsers.dict_new_presized(len(st) / 71)
475 477
476 478 # Python's garbage collector triggers a GC each time a certain number
477 479 # of container objects (the number being defined by
478 480 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
479 481 # for each file in the dirstate. The C version then immediately marks
480 482 # them as not to be tracked by the collector. However, this has no
481 483 # effect on when GCs are triggered, only on what objects the GC looks
482 484 # into. This means that O(number of files) GCs are unavoidable.
483 485 # Depending on when in the process's lifetime the dirstate is parsed,
484 486 # this can get very expensive. As a workaround, disable GC while
485 487 # parsing the dirstate.
486 488 #
487 489 # (we cannot decorate the function directly since it is in a C module)
488 490 parse_dirstate = util.nogc(parsers.parse_dirstate)
489 491 p = parse_dirstate(self._map, self._copymap, st)
490 492 if not self._dirtypl:
491 493 self._pl = p
492 494
493 495 def invalidate(self):
494 496 '''Causes the next access to reread the dirstate.
495 497
496 498 This is different from localrepo.invalidatedirstate() because it always
497 499 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
498 500 check whether the dirstate has changed before rereading it.'''
499 501
500 502 for a in ("_map", "_copymap", "_identity",
501 503 "_filefoldmap", "_dirfoldmap", "_branch",
502 504 "_pl", "_dirs", "_ignore", "_nonnormalset",
503 505 "_otherparentset"):
504 506 if a in self.__dict__:
505 507 delattr(self, a)
506 508 self._lastnormaltime = 0
507 509 self._dirty = False
508 510 self._updatedfiles.clear()
509 511 self._parentwriters = 0
510 512 self._origpl = None
511 513
512 514 def copy(self, source, dest):
513 515 """Mark dest as a copy of source. Unmark dest if source is None."""
514 516 if source == dest:
515 517 return
516 518 self._dirty = True
517 519 if source is not None:
518 520 self._copymap[dest] = source
519 521 self._updatedfiles.add(source)
520 522 self._updatedfiles.add(dest)
521 elif dest in self._copymap:
522 del self._copymap[dest]
523 elif self._copymap.pop(dest, None):
523 524 self._updatedfiles.add(dest)
524 525
525 526 def copied(self, file):
526 527 return self._copymap.get(file, None)
527 528
528 529 def copies(self):
529 530 return self._copymap
530 531
531 532 def _droppath(self, f):
532 533 if self[f] not in "?r" and "_dirs" in self.__dict__:
533 534 self._dirs.delpath(f)
534 535
535 536 if "_filefoldmap" in self.__dict__:
536 537 normed = util.normcase(f)
537 538 if normed in self._filefoldmap:
538 539 del self._filefoldmap[normed]
539 540
540 541 self._updatedfiles.add(f)
541 542
542 543 def _addpath(self, f, state, mode, size, mtime):
543 544 oldstate = self[f]
544 545 if state == 'a' or oldstate == 'r':
545 546 scmutil.checkfilename(f)
546 547 if f in self._dirs:
547 548 raise error.Abort(_('directory %r already in dirstate') % f)
548 549 # shadows
549 550 for d in util.finddirs(f):
550 551 if d in self._dirs:
551 552 break
552 553 if d in self._map and self[d] != 'r':
553 554 raise error.Abort(
554 555 _('file %r in dirstate clashes with %r') % (d, f))
555 556 if oldstate in "?r" and "_dirs" in self.__dict__:
556 557 self._dirs.addpath(f)
557 558 self._dirty = True
558 559 self._updatedfiles.add(f)
559 560 self._map[f] = dirstatetuple(state, mode, size, mtime)
560 561 if state != 'n' or mtime == -1:
561 562 self._nonnormalset.add(f)
562 563 if size == -2:
563 564 self._otherparentset.add(f)
564 565
565 566 def normal(self, f):
566 567 '''Mark a file normal and clean.'''
567 568 s = os.lstat(self._join(f))
568 569 mtime = s.st_mtime
569 570 self._addpath(f, 'n', s.st_mode,
570 571 s.st_size & _rangemask, mtime & _rangemask)
571 if f in self._copymap:
572 del self._copymap[f]
572 self._copymap.pop(f, None)
573 573 if f in self._nonnormalset:
574 574 self._nonnormalset.remove(f)
575 575 if mtime > self._lastnormaltime:
576 576 # Remember the most recent modification timeslot for status(),
577 577 # to make sure we won't miss future size-preserving file content
578 578 # modifications that happen within the same timeslot.
579 579 self._lastnormaltime = mtime
580 580
581 581 def normallookup(self, f):
582 582 '''Mark a file normal, but possibly dirty.'''
583 583 if self._pl[1] != nullid and f in self._map:
584 584 # if there is a merge going on and the file was either
585 585 # in state 'm' (-1) or coming from other parent (-2) before
586 586 # being removed, restore that state.
587 587 entry = self._map[f]
588 588 if entry[0] == 'r' and entry[2] in (-1, -2):
589 589 source = self._copymap.get(f)
590 590 if entry[2] == -1:
591 591 self.merge(f)
592 592 elif entry[2] == -2:
593 593 self.otherparent(f)
594 594 if source:
595 595 self.copy(source, f)
596 596 return
597 597 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
598 598 return
599 599 self._addpath(f, 'n', 0, -1, -1)
600 if f in self._copymap:
601 del self._copymap[f]
600 self._copymap.pop(f, None)
602 601 if f in self._nonnormalset:
603 602 self._nonnormalset.remove(f)
604 603
605 604 def otherparent(self, f):
606 605 '''Mark as coming from the other parent, always dirty.'''
607 606 if self._pl[1] == nullid:
608 607 raise error.Abort(_("setting %r to other parent "
609 608 "only allowed in merges") % f)
610 609 if f in self and self[f] == 'n':
611 610 # merge-like
612 611 self._addpath(f, 'm', 0, -2, -1)
613 612 else:
614 613 # add-like
615 614 self._addpath(f, 'n', 0, -2, -1)
616
617 if f in self._copymap:
618 del self._copymap[f]
615 self._copymap.pop(f, None)
619 616
620 617 def add(self, f):
621 618 '''Mark a file added.'''
622 619 self._addpath(f, 'a', 0, -1, -1)
623 if f in self._copymap:
624 del self._copymap[f]
620 self._copymap.pop(f, None)
625 621
626 622 def remove(self, f):
627 623 '''Mark a file removed.'''
628 624 self._dirty = True
629 625 self._droppath(f)
630 626 size = 0
631 627 if self._pl[1] != nullid and f in self._map:
632 628 # backup the previous state
633 629 entry = self._map[f]
634 630 if entry[0] == 'm': # merge
635 631 size = -1
636 632 elif entry[0] == 'n' and entry[2] == -2: # other parent
637 633 size = -2
638 634 self._otherparentset.add(f)
639 635 self._map[f] = dirstatetuple('r', 0, size, 0)
640 636 self._nonnormalset.add(f)
641 if size == 0 and f in self._copymap:
642 del self._copymap[f]
637 if size == 0:
638 self._copymap.pop(f, None)
643 639
644 640 def merge(self, f):
645 641 '''Mark a file merged.'''
646 642 if self._pl[1] == nullid:
647 643 return self.normallookup(f)
648 644 return self.otherparent(f)
649 645
650 646 def drop(self, f):
651 647 '''Drop a file from the dirstate'''
652 648 if f in self._map:
653 649 self._dirty = True
654 650 self._droppath(f)
655 651 del self._map[f]
656 652 if f in self._nonnormalset:
657 653 self._nonnormalset.remove(f)
658 if f in self._copymap:
659 del self._copymap[f]
654 self._copymap.pop(f, None)
660 655
661 656 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 657 if exists is None:
663 658 exists = os.path.lexists(os.path.join(self._root, path))
664 659 if not exists:
665 660 # Maybe a path component exists
666 661 if not ignoremissing and '/' in path:
667 662 d, f = path.rsplit('/', 1)
668 663 d = self._normalize(d, False, ignoremissing, None)
669 664 folded = d + "/" + f
670 665 else:
671 666 # No path components, preserve original case
672 667 folded = path
673 668 else:
674 669 # recursively normalize leading directory components
675 670 # against dirstate
676 671 if '/' in normed:
677 672 d, f = normed.rsplit('/', 1)
678 673 d = self._normalize(d, False, ignoremissing, True)
679 674 r = self._root + "/" + d
680 675 folded = d + "/" + util.fspath(f, r)
681 676 else:
682 677 folded = util.fspath(normed, self._root)
683 678 storemap[normed] = folded
684 679
685 680 return folded
686 681
687 682 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 683 normed = util.normcase(path)
689 684 folded = self._filefoldmap.get(normed, None)
690 685 if folded is None:
691 686 if isknown:
692 687 folded = path
693 688 else:
694 689 folded = self._discoverpath(path, normed, ignoremissing, exists,
695 690 self._filefoldmap)
696 691 return folded
697 692
698 693 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
699 694 normed = util.normcase(path)
700 695 folded = self._filefoldmap.get(normed, None)
701 696 if folded is None:
702 697 folded = self._dirfoldmap.get(normed, None)
703 698 if folded is None:
704 699 if isknown:
705 700 folded = path
706 701 else:
707 702 # store discovered result in dirfoldmap so that future
708 703 # normalizefile calls don't start matching directories
709 704 folded = self._discoverpath(path, normed, ignoremissing, exists,
710 705 self._dirfoldmap)
711 706 return folded
712 707
713 708 def normalize(self, path, isknown=False, ignoremissing=False):
714 709 '''
715 710 normalize the case of a pathname when on a casefolding filesystem
716 711
717 712 isknown specifies whether the filename came from walking the
718 713 disk, to avoid extra filesystem access.
719 714
720 715 If ignoremissing is True, missing path are returned
721 716 unchanged. Otherwise, we try harder to normalize possibly
722 717 existing path components.
723 718
724 719 The normalized case is determined based on the following precedence:
725 720
726 721 - version of name already stored in the dirstate
727 722 - version of name stored on disk
728 723 - version provided via command arguments
729 724 '''
730 725
731 726 if self._checkcase:
732 727 return self._normalize(path, isknown, ignoremissing)
733 728 return path
734 729
735 730 def clear(self):
736 731 self._map = {}
737 732 self._nonnormalset = set()
738 733 self._otherparentset = set()
739 734 if "_dirs" in self.__dict__:
740 735 delattr(self, "_dirs")
741 736 self._copymap = {}
742 737 self._pl = [nullid, nullid]
743 738 self._lastnormaltime = 0
744 739 self._updatedfiles.clear()
745 740 self._dirty = True
746 741
747 742 def rebuild(self, parent, allfiles, changedfiles=None):
748 743 if changedfiles is None:
749 744 # Rebuild entire dirstate
750 745 changedfiles = allfiles
751 746 lastnormaltime = self._lastnormaltime
752 747 self.clear()
753 748 self._lastnormaltime = lastnormaltime
754 749
755 750 if self._origpl is None:
756 751 self._origpl = self._pl
757 752 self._pl = (parent, nullid)
758 753 for f in changedfiles:
759 754 if f in allfiles:
760 755 self.normallookup(f)
761 756 else:
762 757 self.drop(f)
763 758
764 759 self._dirty = True
765 760
766 761 def identity(self):
767 762 '''Return identity of dirstate itself to detect changing in storage
768 763
769 764 If identity of previous dirstate is equal to this, writing
770 765 changes based on the former dirstate out can keep consistency.
771 766 '''
772 767 return self._identity
773 768
774 769 def write(self, tr):
775 770 if not self._dirty:
776 771 return
777 772
778 773 filename = self._filename
779 774 if tr:
780 775 # 'dirstate.write()' is not only for writing in-memory
781 776 # changes out, but also for dropping ambiguous timestamp.
782 777 # delayed writing re-raise "ambiguous timestamp issue".
783 778 # See also the wiki page below for detail:
784 779 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
785 780
786 781 # emulate dropping timestamp in 'parsers.pack_dirstate'
787 782 now = _getfsnow(self._opener)
788 783 dmap = self._map
789 784 for f in self._updatedfiles:
790 785 e = dmap.get(f)
791 786 if e is not None and e[0] == 'n' and e[3] == now:
792 787 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
793 788 self._nonnormalset.add(f)
794 789
795 790 # emulate that all 'dirstate.normal' results are written out
796 791 self._lastnormaltime = 0
797 792 self._updatedfiles.clear()
798 793
799 794 # delay writing in-memory changes out
800 795 tr.addfilegenerator('dirstate', (self._filename,),
801 796 self._writedirstate, location='plain')
802 797 return
803 798
804 799 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
805 800 self._writedirstate(st)
806 801
807 802 def addparentchangecallback(self, category, callback):
808 803 """add a callback to be called when the wd parents are changed
809 804
810 805 Callback will be called with the following arguments:
811 806 dirstate, (oldp1, oldp2), (newp1, newp2)
812 807
813 808 Category is a unique identifier to allow overwriting an old callback
814 809 with a newer callback.
815 810 """
816 811 self._plchangecallbacks[category] = callback
817 812
818 813 def _writedirstate(self, st):
819 814 # notify callbacks about parents change
820 815 if self._origpl is not None and self._origpl != self._pl:
821 816 for c, callback in sorted(self._plchangecallbacks.iteritems()):
822 817 callback(self, self._origpl, self._pl)
823 818 self._origpl = None
824 819 # use the modification time of the newly created temporary file as the
825 820 # filesystem's notion of 'now'
826 821 now = util.fstat(st).st_mtime & _rangemask
827 822
828 823 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 824 # timestamp of each entries in dirstate, because of 'now > mtime'
830 825 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
831 826 if delaywrite > 0:
832 827 # do we have any files to delay for?
833 828 for f, e in self._map.iteritems():
834 829 if e[0] == 'n' and e[3] == now:
835 830 import time # to avoid useless import
836 831 # rather than sleep n seconds, sleep until the next
837 832 # multiple of n seconds
838 833 clock = time.time()
839 834 start = int(clock) - (int(clock) % delaywrite)
840 835 end = start + delaywrite
841 836 time.sleep(end - clock)
842 837 now = end # trust our estimate that the end is near now
843 838 break
844 839
845 840 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
846 841 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
847 842 st.close()
848 843 self._lastnormaltime = 0
849 844 self._dirty = self._dirtypl = False
850 845
851 846 def _dirignore(self, f):
852 847 if f == '.':
853 848 return False
854 849 if self._ignore(f):
855 850 return True
856 851 for p in util.finddirs(f):
857 852 if self._ignore(p):
858 853 return True
859 854 return False
860 855
861 856 def _ignorefiles(self):
862 857 files = []
863 858 if os.path.exists(self._join('.hgignore')):
864 859 files.append(self._join('.hgignore'))
865 860 for name, path in self._ui.configitems("ui"):
866 861 if name == 'ignore' or name.startswith('ignore.'):
867 862 # we need to use os.path.join here rather than self._join
868 863 # because path is arbitrary and user-specified
869 864 files.append(os.path.join(self._rootdir, util.expandpath(path)))
870 865 return files
871 866
872 867 def _ignorefileandline(self, f):
873 868 files = collections.deque(self._ignorefiles())
874 869 visited = set()
875 870 while files:
876 871 i = files.popleft()
877 872 patterns = matchmod.readpatternfile(i, self._ui.warn,
878 873 sourceinfo=True)
879 874 for pattern, lineno, line in patterns:
880 875 kind, p = matchmod._patsplit(pattern, 'glob')
881 876 if kind == "subinclude":
882 877 if p not in visited:
883 878 files.append(p)
884 879 continue
885 880 m = matchmod.match(self._root, '', [], [pattern],
886 881 warn=self._ui.warn)
887 882 if m(f):
888 883 return (i, lineno, line)
889 884 visited.add(i)
890 885 return (None, -1, "")
891 886
892 887 def _walkexplicit(self, match, subrepos):
893 888 '''Get stat data about the files explicitly specified by match.
894 889
895 890 Return a triple (results, dirsfound, dirsnotfound).
896 891 - results is a mapping from filename to stat result. It also contains
897 892 listings mapping subrepos and .hg to None.
898 893 - dirsfound is a list of files found to be directories.
899 894 - dirsnotfound is a list of files that the dirstate thinks are
900 895 directories and that were not found.'''
901 896
902 897 def badtype(mode):
903 898 kind = _('unknown')
904 899 if stat.S_ISCHR(mode):
905 900 kind = _('character device')
906 901 elif stat.S_ISBLK(mode):
907 902 kind = _('block device')
908 903 elif stat.S_ISFIFO(mode):
909 904 kind = _('fifo')
910 905 elif stat.S_ISSOCK(mode):
911 906 kind = _('socket')
912 907 elif stat.S_ISDIR(mode):
913 908 kind = _('directory')
914 909 return _('unsupported file type (type is %s)') % kind
915 910
916 911 matchedir = match.explicitdir
917 912 badfn = match.bad
918 913 dmap = self._map
919 914 lstat = os.lstat
920 915 getkind = stat.S_IFMT
921 916 dirkind = stat.S_IFDIR
922 917 regkind = stat.S_IFREG
923 918 lnkkind = stat.S_IFLNK
924 919 join = self._join
925 920 dirsfound = []
926 921 foundadd = dirsfound.append
927 922 dirsnotfound = []
928 923 notfoundadd = dirsnotfound.append
929 924
930 925 if not match.isexact() and self._checkcase:
931 926 normalize = self._normalize
932 927 else:
933 928 normalize = None
934 929
935 930 files = sorted(match.files())
936 931 subrepos.sort()
937 932 i, j = 0, 0
938 933 while i < len(files) and j < len(subrepos):
939 934 subpath = subrepos[j] + "/"
940 935 if files[i] < subpath:
941 936 i += 1
942 937 continue
943 938 while i < len(files) and files[i].startswith(subpath):
944 939 del files[i]
945 940 j += 1
946 941
947 942 if not files or '.' in files:
948 943 files = ['.']
949 944 results = dict.fromkeys(subrepos)
950 945 results['.hg'] = None
951 946
952 947 alldirs = None
953 948 for ff in files:
954 949 # constructing the foldmap is expensive, so don't do it for the
955 950 # common case where files is ['.']
956 951 if normalize and ff != '.':
957 952 nf = normalize(ff, False, True)
958 953 else:
959 954 nf = ff
960 955 if nf in results:
961 956 continue
962 957
963 958 try:
964 959 st = lstat(join(nf))
965 960 kind = getkind(st.st_mode)
966 961 if kind == dirkind:
967 962 if nf in dmap:
968 963 # file replaced by dir on disk but still in dirstate
969 964 results[nf] = None
970 965 if matchedir:
971 966 matchedir(nf)
972 967 foundadd((nf, ff))
973 968 elif kind == regkind or kind == lnkkind:
974 969 results[nf] = st
975 970 else:
976 971 badfn(ff, badtype(kind))
977 972 if nf in dmap:
978 973 results[nf] = None
979 974 except OSError as inst: # nf not found on disk - it is dirstate only
980 975 if nf in dmap: # does it exactly match a missing file?
981 976 results[nf] = None
982 977 else: # does it match a missing directory?
983 978 if alldirs is None:
984 979 alldirs = util.dirs(dmap)
985 980 if nf in alldirs:
986 981 if matchedir:
987 982 matchedir(nf)
988 983 notfoundadd(nf)
989 984 else:
990 985 badfn(ff, inst.strerror)
991 986
992 987 # Case insensitive filesystems cannot rely on lstat() failing to detect
993 988 # a case-only rename. Prune the stat object for any file that does not
994 989 # match the case in the filesystem, if there are multiple files that
995 990 # normalize to the same path.
996 991 if match.isexact() and self._checkcase:
997 992 normed = {}
998 993
999 994 for f, st in results.iteritems():
1000 995 if st is None:
1001 996 continue
1002 997
1003 998 nc = util.normcase(f)
1004 999 paths = normed.get(nc)
1005 1000
1006 1001 if paths is None:
1007 1002 paths = set()
1008 1003 normed[nc] = paths
1009 1004
1010 1005 paths.add(f)
1011 1006
1012 1007 for norm, paths in normed.iteritems():
1013 1008 if len(paths) > 1:
1014 1009 for path in paths:
1015 1010 folded = self._discoverpath(path, norm, True, None,
1016 1011 self._dirfoldmap)
1017 1012 if path != folded:
1018 1013 results[path] = None
1019 1014
1020 1015 return results, dirsfound, dirsnotfound
1021 1016
1022 1017 def walk(self, match, subrepos, unknown, ignored, full=True):
1023 1018 '''
1024 1019 Walk recursively through the directory tree, finding all files
1025 1020 matched by match.
1026 1021
1027 1022 If full is False, maybe skip some known-clean files.
1028 1023
1029 1024 Return a dict mapping filename to stat-like object (either
1030 1025 mercurial.osutil.stat instance or return value of os.stat()).
1031 1026
1032 1027 '''
1033 1028 # full is a flag that extensions that hook into walk can use -- this
1034 1029 # implementation doesn't use it at all. This satisfies the contract
1035 1030 # because we only guarantee a "maybe".
1036 1031
1037 1032 if ignored:
1038 1033 ignore = util.never
1039 1034 dirignore = util.never
1040 1035 elif unknown:
1041 1036 ignore = self._ignore
1042 1037 dirignore = self._dirignore
1043 1038 else:
1044 1039 # if not unknown and not ignored, drop dir recursion and step 2
1045 1040 ignore = util.always
1046 1041 dirignore = util.always
1047 1042
1048 1043 matchfn = match.matchfn
1049 1044 matchalways = match.always()
1050 1045 matchtdir = match.traversedir
1051 1046 dmap = self._map
1052 1047 listdir = util.listdir
1053 1048 lstat = os.lstat
1054 1049 dirkind = stat.S_IFDIR
1055 1050 regkind = stat.S_IFREG
1056 1051 lnkkind = stat.S_IFLNK
1057 1052 join = self._join
1058 1053
1059 1054 exact = skipstep3 = False
1060 1055 if match.isexact(): # match.exact
1061 1056 exact = True
1062 1057 dirignore = util.always # skip step 2
1063 1058 elif match.prefix(): # match.match, no patterns
1064 1059 skipstep3 = True
1065 1060
1066 1061 if not exact and self._checkcase:
1067 1062 normalize = self._normalize
1068 1063 normalizefile = self._normalizefile
1069 1064 skipstep3 = False
1070 1065 else:
1071 1066 normalize = self._normalize
1072 1067 normalizefile = None
1073 1068
1074 1069 # step 1: find all explicit files
1075 1070 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1076 1071
1077 1072 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 1073 work = [d for d in work if not dirignore(d[0])]
1079 1074
1080 1075 # step 2: visit subdirectories
1081 1076 def traverse(work, alreadynormed):
1082 1077 wadd = work.append
1083 1078 while work:
1084 1079 nd = work.pop()
1085 1080 if not match.visitdir(nd):
1086 1081 continue
1087 1082 skip = None
1088 1083 if nd == '.':
1089 1084 nd = ''
1090 1085 else:
1091 1086 skip = '.hg'
1092 1087 try:
1093 1088 entries = listdir(join(nd), stat=True, skip=skip)
1094 1089 except OSError as inst:
1095 1090 if inst.errno in (errno.EACCES, errno.ENOENT):
1096 1091 match.bad(self.pathto(nd), inst.strerror)
1097 1092 continue
1098 1093 raise
1099 1094 for f, kind, st in entries:
1100 1095 if normalizefile:
1101 1096 # even though f might be a directory, we're only
1102 1097 # interested in comparing it to files currently in the
1103 1098 # dmap -- therefore normalizefile is enough
1104 1099 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1105 1100 True)
1106 1101 else:
1107 1102 nf = nd and (nd + "/" + f) or f
1108 1103 if nf not in results:
1109 1104 if kind == dirkind:
1110 1105 if not ignore(nf):
1111 1106 if matchtdir:
1112 1107 matchtdir(nf)
1113 1108 wadd(nf)
1114 1109 if nf in dmap and (matchalways or matchfn(nf)):
1115 1110 results[nf] = None
1116 1111 elif kind == regkind or kind == lnkkind:
1117 1112 if nf in dmap:
1118 1113 if matchalways or matchfn(nf):
1119 1114 results[nf] = st
1120 1115 elif ((matchalways or matchfn(nf))
1121 1116 and not ignore(nf)):
1122 1117 # unknown file -- normalize if necessary
1123 1118 if not alreadynormed:
1124 1119 nf = normalize(nf, False, True)
1125 1120 results[nf] = st
1126 1121 elif nf in dmap and (matchalways or matchfn(nf)):
1127 1122 results[nf] = None
1128 1123
1129 1124 for nd, d in work:
1130 1125 # alreadynormed means that processwork doesn't have to do any
1131 1126 # expensive directory normalization
1132 1127 alreadynormed = not normalize or nd == d
1133 1128 traverse([d], alreadynormed)
1134 1129
1135 1130 for s in subrepos:
1136 1131 del results[s]
1137 1132 del results['.hg']
1138 1133
1139 1134 # step 3: visit remaining files from dmap
1140 1135 if not skipstep3 and not exact:
1141 1136 # If a dmap file is not in results yet, it was either
1142 1137 # a) not matching matchfn b) ignored, c) missing, or d) under a
1143 1138 # symlink directory.
1144 1139 if not results and matchalways:
1145 1140 visit = [f for f in dmap]
1146 1141 else:
1147 1142 visit = [f for f in dmap if f not in results and matchfn(f)]
1148 1143 visit.sort()
1149 1144
1150 1145 if unknown:
1151 1146 # unknown == True means we walked all dirs under the roots
1152 1147 # that wasn't ignored, and everything that matched was stat'ed
1153 1148 # and is already in results.
1154 1149 # The rest must thus be ignored or under a symlink.
1155 1150 audit_path = pathutil.pathauditor(self._root, cached=True)
1156 1151
1157 1152 for nf in iter(visit):
1158 1153 # If a stat for the same file was already added with a
1159 1154 # different case, don't add one for this, since that would
1160 1155 # make it appear as if the file exists under both names
1161 1156 # on disk.
1162 1157 if (normalizefile and
1163 1158 normalizefile(nf, True, True) in results):
1164 1159 results[nf] = None
1165 1160 # Report ignored items in the dmap as long as they are not
1166 1161 # under a symlink directory.
1167 1162 elif audit_path.check(nf):
1168 1163 try:
1169 1164 results[nf] = lstat(join(nf))
1170 1165 # file was just ignored, no links, and exists
1171 1166 except OSError:
1172 1167 # file doesn't exist
1173 1168 results[nf] = None
1174 1169 else:
1175 1170 # It's either missing or under a symlink directory
1176 1171 # which we in this case report as missing
1177 1172 results[nf] = None
1178 1173 else:
1179 1174 # We may not have walked the full directory tree above,
1180 1175 # so stat and check everything we missed.
1181 1176 iv = iter(visit)
1182 1177 for st in util.statfiles([join(i) for i in visit]):
1183 1178 results[next(iv)] = st
1184 1179 return results
1185 1180
1186 1181 def status(self, match, subrepos, ignored, clean, unknown):
1187 1182 '''Determine the status of the working copy relative to the
1188 1183 dirstate and return a pair of (unsure, status), where status is of type
1189 1184 scmutil.status and:
1190 1185
1191 1186 unsure:
1192 1187 files that might have been modified since the dirstate was
1193 1188 written, but need to be read to be sure (size is the same
1194 1189 but mtime differs)
1195 1190 status.modified:
1196 1191 files that have definitely been modified since the dirstate
1197 1192 was written (different size or mode)
1198 1193 status.clean:
1199 1194 files that have definitely not been modified since the
1200 1195 dirstate was written
1201 1196 '''
1202 1197 listignored, listclean, listunknown = ignored, clean, unknown
1203 1198 lookup, modified, added, unknown, ignored = [], [], [], [], []
1204 1199 removed, deleted, clean = [], [], []
1205 1200
1206 1201 dmap = self._map
1207 1202 ladd = lookup.append # aka "unsure"
1208 1203 madd = modified.append
1209 1204 aadd = added.append
1210 1205 uadd = unknown.append
1211 1206 iadd = ignored.append
1212 1207 radd = removed.append
1213 1208 dadd = deleted.append
1214 1209 cadd = clean.append
1215 1210 mexact = match.exact
1216 1211 dirignore = self._dirignore
1217 1212 checkexec = self._checkexec
1218 1213 copymap = self._copymap
1219 1214 lastnormaltime = self._lastnormaltime
1220 1215
1221 1216 # We need to do full walks when either
1222 1217 # - we're listing all clean files, or
1223 1218 # - match.traversedir does something, because match.traversedir should
1224 1219 # be called for every dir in the working dir
1225 1220 full = listclean or match.traversedir is not None
1226 1221 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1227 1222 full=full).iteritems():
1228 1223 if fn not in dmap:
1229 1224 if (listignored or mexact(fn)) and dirignore(fn):
1230 1225 if listignored:
1231 1226 iadd(fn)
1232 1227 else:
1233 1228 uadd(fn)
1234 1229 continue
1235 1230
1236 1231 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1237 1232 # written like that for performance reasons. dmap[fn] is not a
1238 1233 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1239 1234 # opcode has fast paths when the value to be unpacked is a tuple or
1240 1235 # a list, but falls back to creating a full-fledged iterator in
1241 1236 # general. That is much slower than simply accessing and storing the
1242 1237 # tuple members one by one.
1243 1238 t = dmap[fn]
1244 1239 state = t[0]
1245 1240 mode = t[1]
1246 1241 size = t[2]
1247 1242 time = t[3]
1248 1243
1249 1244 if not st and state in "nma":
1250 1245 dadd(fn)
1251 1246 elif state == 'n':
1252 1247 if (size >= 0 and
1253 1248 ((size != st.st_size and size != st.st_size & _rangemask)
1254 1249 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1255 1250 or size == -2 # other parent
1256 1251 or fn in copymap):
1257 1252 madd(fn)
1258 1253 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1259 1254 ladd(fn)
1260 1255 elif st.st_mtime == lastnormaltime:
1261 1256 # fn may have just been marked as normal and it may have
1262 1257 # changed in the same second without changing its size.
1263 1258 # This can happen if we quickly do multiple commits.
1264 1259 # Force lookup, so we don't miss such a racy file change.
1265 1260 ladd(fn)
1266 1261 elif listclean:
1267 1262 cadd(fn)
1268 1263 elif state == 'm':
1269 1264 madd(fn)
1270 1265 elif state == 'a':
1271 1266 aadd(fn)
1272 1267 elif state == 'r':
1273 1268 radd(fn)
1274 1269
1275 1270 return (lookup, scmutil.status(modified, added, removed, deleted,
1276 1271 unknown, ignored, clean))
1277 1272
1278 1273 def matches(self, match):
1279 1274 '''
1280 1275 return files in the dirstate (in whatever state) filtered by match
1281 1276 '''
1282 1277 dmap = self._map
1283 1278 if match.always():
1284 1279 return dmap.keys()
1285 1280 files = match.files()
1286 1281 if match.isexact():
1287 1282 # fast path -- filter the other way around, since typically files is
1288 1283 # much smaller than dmap
1289 1284 return [f for f in files if f in dmap]
1290 1285 if match.prefix() and all(fn in dmap for fn in files):
1291 1286 # fast path -- all the values are known to be files, so just return
1292 1287 # that
1293 1288 return list(files)
1294 1289 return [f for f in dmap if match(f)]
1295 1290
1296 1291 def _actualfilename(self, tr):
1297 1292 if tr:
1298 1293 return self._pendingfilename
1299 1294 else:
1300 1295 return self._filename
1301 1296
1302 1297 def savebackup(self, tr, backupname):
1303 1298 '''Save current dirstate into backup file'''
1304 1299 filename = self._actualfilename(tr)
1305 1300 assert backupname != filename
1306 1301
1307 1302 # use '_writedirstate' instead of 'write' to write changes certainly,
1308 1303 # because the latter omits writing out if transaction is running.
1309 1304 # output file will be used to create backup of dirstate at this point.
1310 1305 if self._dirty or not self._opener.exists(filename):
1311 1306 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1312 1307 checkambig=True))
1313 1308
1314 1309 if tr:
1315 1310 # ensure that subsequent tr.writepending returns True for
1316 1311 # changes written out above, even if dirstate is never
1317 1312 # changed after this
1318 1313 tr.addfilegenerator('dirstate', (self._filename,),
1319 1314 self._writedirstate, location='plain')
1320 1315
1321 1316 # ensure that pending file written above is unlinked at
1322 1317 # failure, even if tr.writepending isn't invoked until the
1323 1318 # end of this transaction
1324 1319 tr.registertmp(filename, location='plain')
1325 1320
1326 1321 self._opener.tryunlink(backupname)
1327 1322 # hardlink backup is okay because _writedirstate is always called
1328 1323 # with an "atomictemp=True" file.
1329 1324 util.copyfile(self._opener.join(filename),
1330 1325 self._opener.join(backupname), hardlink=True)
1331 1326
1332 1327 def restorebackup(self, tr, backupname):
1333 1328 '''Restore dirstate by backup file'''
1334 1329 # this "invalidate()" prevents "wlock.release()" from writing
1335 1330 # changes of dirstate out after restoring from backup file
1336 1331 self.invalidate()
1337 1332 filename = self._actualfilename(tr)
1338 1333 self._opener.rename(backupname, filename, checkambig=True)
1339 1334
1340 1335 def clearbackup(self, tr, backupname):
1341 1336 '''Clear backup file'''
1342 1337 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now