##// END OF EJS Templates
dirstate: rename the filegenerator used for writing...
marmoute -
r49532:111f5a0c default
parent child Browse files
Show More
@@ -1,1430 +1,1430 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def requires_parents_change(func):
70 70 def wrap(self, *args, **kwargs):
71 71 if not self.pendingparentchange():
72 72 msg = 'calling `%s` outside of a parentchange context'
73 73 msg %= func.__name__
74 74 raise error.ProgrammingError(msg)
75 75 return func(self, *args, **kwargs)
76 76
77 77 return wrap
78 78
79 79
80 80 def requires_no_parents_change(func):
81 81 def wrap(self, *args, **kwargs):
82 82 if self.pendingparentchange():
83 83 msg = 'calling `%s` inside of a parentchange context'
84 84 msg %= func.__name__
85 85 raise error.ProgrammingError(msg)
86 86 return func(self, *args, **kwargs)
87 87
88 88 return wrap
89 89
90 90
91 91 @interfaceutil.implementer(intdirstate.idirstate)
92 92 class dirstate(object):
93 93 def __init__(
94 94 self,
95 95 opener,
96 96 ui,
97 97 root,
98 98 validate,
99 99 sparsematchfn,
100 100 nodeconstants,
101 101 use_dirstate_v2,
102 102 ):
103 103 """Create a new dirstate object.
104 104
105 105 opener is an open()-like callable that can be used to open the
106 106 dirstate file; root is the root of the directory tracked by
107 107 the dirstate.
108 108 """
109 109 self._use_dirstate_v2 = use_dirstate_v2
110 110 self._nodeconstants = nodeconstants
111 111 self._opener = opener
112 112 self._validate = validate
113 113 self._root = root
114 114 self._sparsematchfn = sparsematchfn
115 115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
116 116 # UNC path pointing to root share (issue4557)
117 117 self._rootdir = pathutil.normasprefix(root)
118 118 self._dirty = False
119 119 self._ui = ui
120 120 self._filecache = {}
121 121 self._parentwriters = 0
122 122 self._filename = b'dirstate'
123 123 self._pendingfilename = b'%s.pending' % self._filename
124 124 self._plchangecallbacks = {}
125 125 self._origpl = None
126 126 self._mapcls = dirstatemap.dirstatemap
127 127 # Access and cache cwd early, so we don't access it for the first time
128 128 # after a working-copy update caused it to not exist (accessing it then
129 129 # raises an exception).
130 130 self._cwd
131 131
132 132 def prefetch_parents(self):
133 133 """make sure the parents are loaded
134 134
135 135 Used to avoid a race condition.
136 136 """
137 137 self._pl
138 138
139 139 @contextlib.contextmanager
140 140 def parentchange(self):
141 141 """Context manager for handling dirstate parents.
142 142
143 143 If an exception occurs in the scope of the context manager,
144 144 the incoherent dirstate won't be written when wlock is
145 145 released.
146 146 """
147 147 self._parentwriters += 1
148 148 yield
149 149 # Typically we want the "undo" step of a context manager in a
150 150 # finally block so it happens even when an exception
151 151 # occurs. In this case, however, we only want to decrement
152 152 # parentwriters if the code in the with statement exits
153 153 # normally, so we don't have a try/finally here on purpose.
154 154 self._parentwriters -= 1
155 155
156 156 def pendingparentchange(self):
157 157 """Returns true if the dirstate is in the middle of a set of changes
158 158 that modify the dirstate parent.
159 159 """
160 160 return self._parentwriters > 0
161 161
162 162 @propertycache
163 163 def _map(self):
164 164 """Return the dirstate contents (see documentation for dirstatemap)."""
165 165 self._map = self._mapcls(
166 166 self._ui,
167 167 self._opener,
168 168 self._root,
169 169 self._nodeconstants,
170 170 self._use_dirstate_v2,
171 171 )
172 172 return self._map
173 173
174 174 @property
175 175 def _sparsematcher(self):
176 176 """The matcher for the sparse checkout.
177 177
178 178 The working directory may not include every file from a manifest. The
179 179 matcher obtained by this property will match a path if it is to be
180 180 included in the working directory.
181 181 """
182 182 # TODO there is potential to cache this property. For now, the matcher
183 183 # is resolved on every access. (But the called function does use a
184 184 # cache to keep the lookup fast.)
185 185 return self._sparsematchfn()
186 186
187 187 @repocache(b'branch')
188 188 def _branch(self):
189 189 try:
190 190 return self._opener.read(b"branch").strip() or b"default"
191 191 except IOError as inst:
192 192 if inst.errno != errno.ENOENT:
193 193 raise
194 194 return b"default"
195 195
196 196 @property
197 197 def _pl(self):
198 198 return self._map.parents()
199 199
200 200 def hasdir(self, d):
201 201 return self._map.hastrackeddir(d)
202 202
203 203 @rootcache(b'.hgignore')
204 204 def _ignore(self):
205 205 files = self._ignorefiles()
206 206 if not files:
207 207 return matchmod.never()
208 208
209 209 pats = [b'include:%s' % f for f in files]
210 210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
211 211
212 212 @propertycache
213 213 def _slash(self):
214 214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
215 215
216 216 @propertycache
217 217 def _checklink(self):
218 218 return util.checklink(self._root)
219 219
220 220 @propertycache
221 221 def _checkexec(self):
222 222 return bool(util.checkexec(self._root))
223 223
224 224 @propertycache
225 225 def _checkcase(self):
226 226 return not util.fscasesensitive(self._join(b'.hg'))
227 227
228 228 def _join(self, f):
229 229 # much faster than os.path.join()
230 230 # it's safe because f is always a relative path
231 231 return self._rootdir + f
232 232
233 233 def flagfunc(self, buildfallback):
234 234 """build a callable that returns flags associated with a filename
235 235
236 236 The information is extracted from three possible layers:
237 237 1. the file system if it supports the information
238 238 2. the "fallback" information stored in the dirstate if any
239 239 3. a more expensive mechanism inferring the flags from the parents.
240 240 """
241 241
242 242 # small hack to cache the result of buildfallback()
243 243 fallback_func = []
244 244
245 245 def get_flags(x):
246 246 entry = None
247 247 fallback_value = None
248 248 try:
249 249 st = os.lstat(self._join(x))
250 250 except OSError:
251 251 return b''
252 252
253 253 if self._checklink:
254 254 if util.statislink(st):
255 255 return b'l'
256 256 else:
257 257 entry = self.get_entry(x)
258 258 if entry.has_fallback_symlink:
259 259 if entry.fallback_symlink:
260 260 return b'l'
261 261 else:
262 262 if not fallback_func:
263 263 fallback_func.append(buildfallback())
264 264 fallback_value = fallback_func[0](x)
265 265 if b'l' in fallback_value:
266 266 return b'l'
267 267
268 268 if self._checkexec:
269 269 if util.statisexec(st):
270 270 return b'x'
271 271 else:
272 272 if entry is None:
273 273 entry = self.get_entry(x)
274 274 if entry.has_fallback_exec:
275 275 if entry.fallback_exec:
276 276 return b'x'
277 277 else:
278 278 if fallback_value is None:
279 279 if not fallback_func:
280 280 fallback_func.append(buildfallback())
281 281 fallback_value = fallback_func[0](x)
282 282 if b'x' in fallback_value:
283 283 return b'x'
284 284 return b''
285 285
286 286 return get_flags
287 287
288 288 @propertycache
289 289 def _cwd(self):
290 290 # internal config: ui.forcecwd
291 291 forcecwd = self._ui.config(b'ui', b'forcecwd')
292 292 if forcecwd:
293 293 return forcecwd
294 294 return encoding.getcwd()
295 295
296 296 def getcwd(self):
297 297 """Return the path from which a canonical path is calculated.
298 298
299 299 This path should be used to resolve file patterns or to convert
300 300 canonical paths back to file paths for display. It shouldn't be
301 301 used to get real file paths. Use vfs functions instead.
302 302 """
303 303 cwd = self._cwd
304 304 if cwd == self._root:
305 305 return b''
306 306 # self._root ends with a path separator if self._root is '/' or 'C:\'
307 307 rootsep = self._root
308 308 if not util.endswithsep(rootsep):
309 309 rootsep += pycompat.ossep
310 310 if cwd.startswith(rootsep):
311 311 return cwd[len(rootsep) :]
312 312 else:
313 313 # we're outside the repo. return an absolute path.
314 314 return cwd
315 315
316 316 def pathto(self, f, cwd=None):
317 317 if cwd is None:
318 318 cwd = self.getcwd()
319 319 path = util.pathto(self._root, cwd, f)
320 320 if self._slash:
321 321 return util.pconvert(path)
322 322 return path
323 323
324 324 def get_entry(self, path):
325 325 """return a DirstateItem for the associated path"""
326 326 entry = self._map.get(path)
327 327 if entry is None:
328 328 return DirstateItem()
329 329 return entry
330 330
331 331 def __contains__(self, key):
332 332 return key in self._map
333 333
334 334 def __iter__(self):
335 335 return iter(sorted(self._map))
336 336
337 337 def items(self):
338 338 return pycompat.iteritems(self._map)
339 339
340 340 iteritems = items
341 341
342 342 def parents(self):
343 343 return [self._validate(p) for p in self._pl]
344 344
345 345 def p1(self):
346 346 return self._validate(self._pl[0])
347 347
348 348 def p2(self):
349 349 return self._validate(self._pl[1])
350 350
351 351 @property
352 352 def in_merge(self):
353 353 """True if a merge is in progress"""
354 354 return self._pl[1] != self._nodeconstants.nullid
355 355
356 356 def branch(self):
357 357 return encoding.tolocal(self._branch)
358 358
359 359 def setparents(self, p1, p2=None):
360 360 """Set dirstate parents to p1 and p2.
361 361
362 362 When moving from two parents to one, "merged" entries a
363 363 adjusted to normal and previous copy records discarded and
364 364 returned by the call.
365 365
366 366 See localrepo.setparents()
367 367 """
368 368 if p2 is None:
369 369 p2 = self._nodeconstants.nullid
370 370 if self._parentwriters == 0:
371 371 raise ValueError(
372 372 b"cannot set dirstate parent outside of "
373 373 b"dirstate.parentchange context manager"
374 374 )
375 375
376 376 self._dirty = True
377 377 oldp2 = self._pl[1]
378 378 if self._origpl is None:
379 379 self._origpl = self._pl
380 380 nullid = self._nodeconstants.nullid
381 381 # True if we need to fold p2 related state back to a linear case
382 382 fold_p2 = oldp2 != nullid and p2 == nullid
383 383 return self._map.setparents(p1, p2, fold_p2=fold_p2)
384 384
385 385 def setbranch(self, branch):
386 386 self.__class__._branch.set(self, encoding.fromlocal(branch))
387 387 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
388 388 try:
389 389 f.write(self._branch + b'\n')
390 390 f.close()
391 391
392 392 # make sure filecache has the correct stat info for _branch after
393 393 # replacing the underlying file
394 394 ce = self._filecache[b'_branch']
395 395 if ce:
396 396 ce.refresh()
397 397 except: # re-raises
398 398 f.discard()
399 399 raise
400 400
401 401 def invalidate(self):
402 402 """Causes the next access to reread the dirstate.
403 403
404 404 This is different from localrepo.invalidatedirstate() because it always
405 405 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
406 406 check whether the dirstate has changed before rereading it."""
407 407
408 408 for a in ("_map", "_branch", "_ignore"):
409 409 if a in self.__dict__:
410 410 delattr(self, a)
411 411 self._dirty = False
412 412 self._parentwriters = 0
413 413 self._origpl = None
414 414
415 415 def copy(self, source, dest):
416 416 """Mark dest as a copy of source. Unmark dest if source is None."""
417 417 if source == dest:
418 418 return
419 419 self._dirty = True
420 420 if source is not None:
421 421 self._map.copymap[dest] = source
422 422 else:
423 423 self._map.copymap.pop(dest, None)
424 424
425 425 def copied(self, file):
426 426 return self._map.copymap.get(file, None)
427 427
428 428 def copies(self):
429 429 return self._map.copymap
430 430
431 431 @requires_no_parents_change
432 432 def set_tracked(self, filename, reset_copy=False):
433 433 """a "public" method for generic code to mark a file as tracked
434 434
435 435 This function is to be called outside of "update/merge" case. For
436 436 example by a command like `hg add X`.
437 437
438 438 if reset_copy is set, any existing copy information will be dropped.
439 439
440 440 return True the file was previously untracked, False otherwise.
441 441 """
442 442 self._dirty = True
443 443 entry = self._map.get(filename)
444 444 if entry is None or not entry.tracked:
445 445 self._check_new_tracked_filename(filename)
446 446 pre_tracked = self._map.set_tracked(filename)
447 447 if reset_copy:
448 448 self._map.copymap.pop(filename, None)
449 449 return pre_tracked
450 450
451 451 @requires_no_parents_change
452 452 def set_untracked(self, filename):
453 453 """a "public" method for generic code to mark a file as untracked
454 454
455 455 This function is to be called outside of "update/merge" case. For
456 456 example by a command like `hg remove X`.
457 457
458 458 return True the file was previously tracked, False otherwise.
459 459 """
460 460 ret = self._map.set_untracked(filename)
461 461 if ret:
462 462 self._dirty = True
463 463 return ret
464 464
465 465 @requires_no_parents_change
466 466 def set_clean(self, filename, parentfiledata):
467 467 """record that the current state of the file on disk is known to be clean"""
468 468 self._dirty = True
469 469 if not self._map[filename].tracked:
470 470 self._check_new_tracked_filename(filename)
471 471 (mode, size, mtime) = parentfiledata
472 472 self._map.set_clean(filename, mode, size, mtime)
473 473
474 474 @requires_no_parents_change
475 475 def set_possibly_dirty(self, filename):
476 476 """record that the current state of the file on disk is unknown"""
477 477 self._dirty = True
478 478 self._map.set_possibly_dirty(filename)
479 479
480 480 @requires_parents_change
481 481 def update_file_p1(
482 482 self,
483 483 filename,
484 484 p1_tracked,
485 485 ):
486 486 """Set a file as tracked in the parent (or not)
487 487
488 488 This is to be called when adjust the dirstate to a new parent after an history
489 489 rewriting operation.
490 490
491 491 It should not be called during a merge (p2 != nullid) and only within
492 492 a `with dirstate.parentchange():` context.
493 493 """
494 494 if self.in_merge:
495 495 msg = b'update_file_reference should not be called when merging'
496 496 raise error.ProgrammingError(msg)
497 497 entry = self._map.get(filename)
498 498 if entry is None:
499 499 wc_tracked = False
500 500 else:
501 501 wc_tracked = entry.tracked
502 502 if not (p1_tracked or wc_tracked):
503 503 # the file is no longer relevant to anyone
504 504 if self._map.get(filename) is not None:
505 505 self._map.reset_state(filename)
506 506 self._dirty = True
507 507 elif (not p1_tracked) and wc_tracked:
508 508 if entry is not None and entry.added:
509 509 return # avoid dropping copy information (maybe?)
510 510
511 511 self._map.reset_state(
512 512 filename,
513 513 wc_tracked,
514 514 p1_tracked,
515 515 # the underlying reference might have changed, we will have to
516 516 # check it.
517 517 has_meaningful_mtime=False,
518 518 )
519 519
520 520 @requires_parents_change
521 521 def update_file(
522 522 self,
523 523 filename,
524 524 wc_tracked,
525 525 p1_tracked,
526 526 p2_info=False,
527 527 possibly_dirty=False,
528 528 parentfiledata=None,
529 529 ):
530 530 """update the information about a file in the dirstate
531 531
532 532 This is to be called when the direstates parent changes to keep track
533 533 of what is the file situation in regards to the working copy and its parent.
534 534
535 535 This function must be called within a `dirstate.parentchange` context.
536 536
537 537 note: the API is at an early stage and we might need to adjust it
538 538 depending of what information ends up being relevant and useful to
539 539 other processing.
540 540 """
541 541
542 542 # note: I do not think we need to double check name clash here since we
543 543 # are in a update/merge case that should already have taken care of
544 544 # this. The test agrees
545 545
546 546 self._dirty = True
547 547
548 548 self._map.reset_state(
549 549 filename,
550 550 wc_tracked,
551 551 p1_tracked,
552 552 p2_info=p2_info,
553 553 has_meaningful_mtime=not possibly_dirty,
554 554 parentfiledata=parentfiledata,
555 555 )
556 556
557 557 def _check_new_tracked_filename(self, filename):
558 558 scmutil.checkfilename(filename)
559 559 if self._map.hastrackeddir(filename):
560 560 msg = _(b'directory %r already in dirstate')
561 561 msg %= pycompat.bytestr(filename)
562 562 raise error.Abort(msg)
563 563 # shadows
564 564 for d in pathutil.finddirs(filename):
565 565 if self._map.hastrackeddir(d):
566 566 break
567 567 entry = self._map.get(d)
568 568 if entry is not None and not entry.removed:
569 569 msg = _(b'file %r in dirstate clashes with %r')
570 570 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
571 571 raise error.Abort(msg)
572 572
573 573 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
574 574 if exists is None:
575 575 exists = os.path.lexists(os.path.join(self._root, path))
576 576 if not exists:
577 577 # Maybe a path component exists
578 578 if not ignoremissing and b'/' in path:
579 579 d, f = path.rsplit(b'/', 1)
580 580 d = self._normalize(d, False, ignoremissing, None)
581 581 folded = d + b"/" + f
582 582 else:
583 583 # No path components, preserve original case
584 584 folded = path
585 585 else:
586 586 # recursively normalize leading directory components
587 587 # against dirstate
588 588 if b'/' in normed:
589 589 d, f = normed.rsplit(b'/', 1)
590 590 d = self._normalize(d, False, ignoremissing, True)
591 591 r = self._root + b"/" + d
592 592 folded = d + b"/" + util.fspath(f, r)
593 593 else:
594 594 folded = util.fspath(normed, self._root)
595 595 storemap[normed] = folded
596 596
597 597 return folded
598 598
599 599 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
600 600 normed = util.normcase(path)
601 601 folded = self._map.filefoldmap.get(normed, None)
602 602 if folded is None:
603 603 if isknown:
604 604 folded = path
605 605 else:
606 606 folded = self._discoverpath(
607 607 path, normed, ignoremissing, exists, self._map.filefoldmap
608 608 )
609 609 return folded
610 610
611 611 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 612 normed = util.normcase(path)
613 613 folded = self._map.filefoldmap.get(normed, None)
614 614 if folded is None:
615 615 folded = self._map.dirfoldmap.get(normed, None)
616 616 if folded is None:
617 617 if isknown:
618 618 folded = path
619 619 else:
620 620 # store discovered result in dirfoldmap so that future
621 621 # normalizefile calls don't start matching directories
622 622 folded = self._discoverpath(
623 623 path, normed, ignoremissing, exists, self._map.dirfoldmap
624 624 )
625 625 return folded
626 626
627 627 def normalize(self, path, isknown=False, ignoremissing=False):
628 628 """
629 629 normalize the case of a pathname when on a casefolding filesystem
630 630
631 631 isknown specifies whether the filename came from walking the
632 632 disk, to avoid extra filesystem access.
633 633
634 634 If ignoremissing is True, missing path are returned
635 635 unchanged. Otherwise, we try harder to normalize possibly
636 636 existing path components.
637 637
638 638 The normalized case is determined based on the following precedence:
639 639
640 640 - version of name already stored in the dirstate
641 641 - version of name stored on disk
642 642 - version provided via command arguments
643 643 """
644 644
645 645 if self._checkcase:
646 646 return self._normalize(path, isknown, ignoremissing)
647 647 return path
648 648
649 649 def clear(self):
650 650 self._map.clear()
651 651 self._dirty = True
652 652
653 653 def rebuild(self, parent, allfiles, changedfiles=None):
654 654 if changedfiles is None:
655 655 # Rebuild entire dirstate
656 656 to_lookup = allfiles
657 657 to_drop = []
658 658 self.clear()
659 659 elif len(changedfiles) < 10:
660 660 # Avoid turning allfiles into a set, which can be expensive if it's
661 661 # large.
662 662 to_lookup = []
663 663 to_drop = []
664 664 for f in changedfiles:
665 665 if f in allfiles:
666 666 to_lookup.append(f)
667 667 else:
668 668 to_drop.append(f)
669 669 else:
670 670 changedfilesset = set(changedfiles)
671 671 to_lookup = changedfilesset & set(allfiles)
672 672 to_drop = changedfilesset - to_lookup
673 673
674 674 if self._origpl is None:
675 675 self._origpl = self._pl
676 676 self._map.setparents(parent, self._nodeconstants.nullid)
677 677
678 678 for f in to_lookup:
679 679
680 680 if self.in_merge:
681 681 self.set_tracked(f)
682 682 else:
683 683 self._map.reset_state(
684 684 f,
685 685 wc_tracked=True,
686 686 p1_tracked=True,
687 687 )
688 688 for f in to_drop:
689 689 self._map.reset_state(f)
690 690
691 691 self._dirty = True
692 692
693 693 def identity(self):
694 694 """Return identity of dirstate itself to detect changing in storage
695 695
696 696 If identity of previous dirstate is equal to this, writing
697 697 changes based on the former dirstate out can keep consistency.
698 698 """
699 699 return self._map.identity
700 700
701 701 def write(self, tr):
702 702 if not self._dirty:
703 703 return
704 704
705 705 filename = self._filename
706 706 if tr:
707 707 # delay writing in-memory changes out
708 708 tr.addfilegenerator(
709 b'dirstate',
709 b'dirstate-1-main',
710 710 (self._filename,),
711 711 lambda f: self._writedirstate(tr, f),
712 712 location=b'plain',
713 713 )
714 714 return
715 715
716 716 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
717 717 with file(self._filename) as f:
718 718 self._writedirstate(tr, f)
719 719
720 720 def addparentchangecallback(self, category, callback):
721 721 """add a callback to be called when the wd parents are changed
722 722
723 723 Callback will be called with the following arguments:
724 724 dirstate, (oldp1, oldp2), (newp1, newp2)
725 725
726 726 Category is a unique identifier to allow overwriting an old callback
727 727 with a newer callback.
728 728 """
729 729 self._plchangecallbacks[category] = callback
730 730
731 731 def _writedirstate(self, tr, st):
732 732 # notify callbacks about parents change
733 733 if self._origpl is not None and self._origpl != self._pl:
734 734 for c, callback in sorted(
735 735 pycompat.iteritems(self._plchangecallbacks)
736 736 ):
737 737 callback(self, self._origpl, self._pl)
738 738 self._origpl = None
739 739
740 740 self._map.write(tr, st)
741 741 self._dirty = False
742 742
743 743 def _dirignore(self, f):
744 744 if self._ignore(f):
745 745 return True
746 746 for p in pathutil.finddirs(f):
747 747 if self._ignore(p):
748 748 return True
749 749 return False
750 750
751 751 def _ignorefiles(self):
752 752 files = []
753 753 if os.path.exists(self._join(b'.hgignore')):
754 754 files.append(self._join(b'.hgignore'))
755 755 for name, path in self._ui.configitems(b"ui"):
756 756 if name == b'ignore' or name.startswith(b'ignore.'):
757 757 # we need to use os.path.join here rather than self._join
758 758 # because path is arbitrary and user-specified
759 759 files.append(os.path.join(self._rootdir, util.expandpath(path)))
760 760 return files
761 761
762 762 def _ignorefileandline(self, f):
763 763 files = collections.deque(self._ignorefiles())
764 764 visited = set()
765 765 while files:
766 766 i = files.popleft()
767 767 patterns = matchmod.readpatternfile(
768 768 i, self._ui.warn, sourceinfo=True
769 769 )
770 770 for pattern, lineno, line in patterns:
771 771 kind, p = matchmod._patsplit(pattern, b'glob')
772 772 if kind == b"subinclude":
773 773 if p not in visited:
774 774 files.append(p)
775 775 continue
776 776 m = matchmod.match(
777 777 self._root, b'', [], [pattern], warn=self._ui.warn
778 778 )
779 779 if m(f):
780 780 return (i, lineno, line)
781 781 visited.add(i)
782 782 return (None, -1, b"")
783 783
784 784 def _walkexplicit(self, match, subrepos):
785 785 """Get stat data about the files explicitly specified by match.
786 786
787 787 Return a triple (results, dirsfound, dirsnotfound).
788 788 - results is a mapping from filename to stat result. It also contains
789 789 listings mapping subrepos and .hg to None.
790 790 - dirsfound is a list of files found to be directories.
791 791 - dirsnotfound is a list of files that the dirstate thinks are
792 792 directories and that were not found."""
793 793
794 794 def badtype(mode):
795 795 kind = _(b'unknown')
796 796 if stat.S_ISCHR(mode):
797 797 kind = _(b'character device')
798 798 elif stat.S_ISBLK(mode):
799 799 kind = _(b'block device')
800 800 elif stat.S_ISFIFO(mode):
801 801 kind = _(b'fifo')
802 802 elif stat.S_ISSOCK(mode):
803 803 kind = _(b'socket')
804 804 elif stat.S_ISDIR(mode):
805 805 kind = _(b'directory')
806 806 return _(b'unsupported file type (type is %s)') % kind
807 807
808 808 badfn = match.bad
809 809 dmap = self._map
810 810 lstat = os.lstat
811 811 getkind = stat.S_IFMT
812 812 dirkind = stat.S_IFDIR
813 813 regkind = stat.S_IFREG
814 814 lnkkind = stat.S_IFLNK
815 815 join = self._join
816 816 dirsfound = []
817 817 foundadd = dirsfound.append
818 818 dirsnotfound = []
819 819 notfoundadd = dirsnotfound.append
820 820
821 821 if not match.isexact() and self._checkcase:
822 822 normalize = self._normalize
823 823 else:
824 824 normalize = None
825 825
826 826 files = sorted(match.files())
827 827 subrepos.sort()
828 828 i, j = 0, 0
829 829 while i < len(files) and j < len(subrepos):
830 830 subpath = subrepos[j] + b"/"
831 831 if files[i] < subpath:
832 832 i += 1
833 833 continue
834 834 while i < len(files) and files[i].startswith(subpath):
835 835 del files[i]
836 836 j += 1
837 837
838 838 if not files or b'' in files:
839 839 files = [b'']
840 840 # constructing the foldmap is expensive, so don't do it for the
841 841 # common case where files is ['']
842 842 normalize = None
843 843 results = dict.fromkeys(subrepos)
844 844 results[b'.hg'] = None
845 845
846 846 for ff in files:
847 847 if normalize:
848 848 nf = normalize(ff, False, True)
849 849 else:
850 850 nf = ff
851 851 if nf in results:
852 852 continue
853 853
854 854 try:
855 855 st = lstat(join(nf))
856 856 kind = getkind(st.st_mode)
857 857 if kind == dirkind:
858 858 if nf in dmap:
859 859 # file replaced by dir on disk but still in dirstate
860 860 results[nf] = None
861 861 foundadd((nf, ff))
862 862 elif kind == regkind or kind == lnkkind:
863 863 results[nf] = st
864 864 else:
865 865 badfn(ff, badtype(kind))
866 866 if nf in dmap:
867 867 results[nf] = None
868 868 except OSError as inst: # nf not found on disk - it is dirstate only
869 869 if nf in dmap: # does it exactly match a missing file?
870 870 results[nf] = None
871 871 else: # does it match a missing directory?
872 872 if self._map.hasdir(nf):
873 873 notfoundadd(nf)
874 874 else:
875 875 badfn(ff, encoding.strtolocal(inst.strerror))
876 876
877 877 # match.files() may contain explicitly-specified paths that shouldn't
878 878 # be taken; drop them from the list of files found. dirsfound/notfound
879 879 # aren't filtered here because they will be tested later.
880 880 if match.anypats():
881 881 for f in list(results):
882 882 if f == b'.hg' or f in subrepos:
883 883 # keep sentinel to disable further out-of-repo walks
884 884 continue
885 885 if not match(f):
886 886 del results[f]
887 887
888 888 # Case insensitive filesystems cannot rely on lstat() failing to detect
889 889 # a case-only rename. Prune the stat object for any file that does not
890 890 # match the case in the filesystem, if there are multiple files that
891 891 # normalize to the same path.
892 892 if match.isexact() and self._checkcase:
893 893 normed = {}
894 894
895 895 for f, st in pycompat.iteritems(results):
896 896 if st is None:
897 897 continue
898 898
899 899 nc = util.normcase(f)
900 900 paths = normed.get(nc)
901 901
902 902 if paths is None:
903 903 paths = set()
904 904 normed[nc] = paths
905 905
906 906 paths.add(f)
907 907
908 908 for norm, paths in pycompat.iteritems(normed):
909 909 if len(paths) > 1:
910 910 for path in paths:
911 911 folded = self._discoverpath(
912 912 path, norm, True, None, self._map.dirfoldmap
913 913 )
914 914 if path != folded:
915 915 results[path] = None
916 916
917 917 return results, dirsfound, dirsnotfound
918 918
919 919 def walk(self, match, subrepos, unknown, ignored, full=True):
920 920 """
921 921 Walk recursively through the directory tree, finding all files
922 922 matched by match.
923 923
924 924 If full is False, maybe skip some known-clean files.
925 925
926 926 Return a dict mapping filename to stat-like object (either
927 927 mercurial.osutil.stat instance or return value of os.stat()).
928 928
929 929 """
930 930 # full is a flag that extensions that hook into walk can use -- this
931 931 # implementation doesn't use it at all. This satisfies the contract
932 932 # because we only guarantee a "maybe".
933 933
934 934 if ignored:
935 935 ignore = util.never
936 936 dirignore = util.never
937 937 elif unknown:
938 938 ignore = self._ignore
939 939 dirignore = self._dirignore
940 940 else:
941 941 # if not unknown and not ignored, drop dir recursion and step 2
942 942 ignore = util.always
943 943 dirignore = util.always
944 944
945 945 matchfn = match.matchfn
946 946 matchalways = match.always()
947 947 matchtdir = match.traversedir
948 948 dmap = self._map
949 949 listdir = util.listdir
950 950 lstat = os.lstat
951 951 dirkind = stat.S_IFDIR
952 952 regkind = stat.S_IFREG
953 953 lnkkind = stat.S_IFLNK
954 954 join = self._join
955 955
956 956 exact = skipstep3 = False
957 957 if match.isexact(): # match.exact
958 958 exact = True
959 959 dirignore = util.always # skip step 2
960 960 elif match.prefix(): # match.match, no patterns
961 961 skipstep3 = True
962 962
963 963 if not exact and self._checkcase:
964 964 normalize = self._normalize
965 965 normalizefile = self._normalizefile
966 966 skipstep3 = False
967 967 else:
968 968 normalize = self._normalize
969 969 normalizefile = None
970 970
971 971 # step 1: find all explicit files
972 972 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
973 973 if matchtdir:
974 974 for d in work:
975 975 matchtdir(d[0])
976 976 for d in dirsnotfound:
977 977 matchtdir(d)
978 978
979 979 skipstep3 = skipstep3 and not (work or dirsnotfound)
980 980 work = [d for d in work if not dirignore(d[0])]
981 981
982 982 # step 2: visit subdirectories
983 983 def traverse(work, alreadynormed):
984 984 wadd = work.append
985 985 while work:
986 986 tracing.counter('dirstate.walk work', len(work))
987 987 nd = work.pop()
988 988 visitentries = match.visitchildrenset(nd)
989 989 if not visitentries:
990 990 continue
991 991 if visitentries == b'this' or visitentries == b'all':
992 992 visitentries = None
993 993 skip = None
994 994 if nd != b'':
995 995 skip = b'.hg'
996 996 try:
997 997 with tracing.log('dirstate.walk.traverse listdir %s', nd):
998 998 entries = listdir(join(nd), stat=True, skip=skip)
999 999 except OSError as inst:
1000 1000 if inst.errno in (errno.EACCES, errno.ENOENT):
1001 1001 match.bad(
1002 1002 self.pathto(nd), encoding.strtolocal(inst.strerror)
1003 1003 )
1004 1004 continue
1005 1005 raise
1006 1006 for f, kind, st in entries:
1007 1007 # Some matchers may return files in the visitentries set,
1008 1008 # instead of 'this', if the matcher explicitly mentions them
1009 1009 # and is not an exactmatcher. This is acceptable; we do not
1010 1010 # make any hard assumptions about file-or-directory below
1011 1011 # based on the presence of `f` in visitentries. If
1012 1012 # visitchildrenset returned a set, we can always skip the
1013 1013 # entries *not* in the set it provided regardless of whether
1014 1014 # they're actually a file or a directory.
1015 1015 if visitentries and f not in visitentries:
1016 1016 continue
1017 1017 if normalizefile:
1018 1018 # even though f might be a directory, we're only
1019 1019 # interested in comparing it to files currently in the
1020 1020 # dmap -- therefore normalizefile is enough
1021 1021 nf = normalizefile(
1022 1022 nd and (nd + b"/" + f) or f, True, True
1023 1023 )
1024 1024 else:
1025 1025 nf = nd and (nd + b"/" + f) or f
1026 1026 if nf not in results:
1027 1027 if kind == dirkind:
1028 1028 if not ignore(nf):
1029 1029 if matchtdir:
1030 1030 matchtdir(nf)
1031 1031 wadd(nf)
1032 1032 if nf in dmap and (matchalways or matchfn(nf)):
1033 1033 results[nf] = None
1034 1034 elif kind == regkind or kind == lnkkind:
1035 1035 if nf in dmap:
1036 1036 if matchalways or matchfn(nf):
1037 1037 results[nf] = st
1038 1038 elif (matchalways or matchfn(nf)) and not ignore(
1039 1039 nf
1040 1040 ):
1041 1041 # unknown file -- normalize if necessary
1042 1042 if not alreadynormed:
1043 1043 nf = normalize(nf, False, True)
1044 1044 results[nf] = st
1045 1045 elif nf in dmap and (matchalways or matchfn(nf)):
1046 1046 results[nf] = None
1047 1047
1048 1048 for nd, d in work:
1049 1049 # alreadynormed means that processwork doesn't have to do any
1050 1050 # expensive directory normalization
1051 1051 alreadynormed = not normalize or nd == d
1052 1052 traverse([d], alreadynormed)
1053 1053
1054 1054 for s in subrepos:
1055 1055 del results[s]
1056 1056 del results[b'.hg']
1057 1057
1058 1058 # step 3: visit remaining files from dmap
1059 1059 if not skipstep3 and not exact:
1060 1060 # If a dmap file is not in results yet, it was either
1061 1061 # a) not matching matchfn b) ignored, c) missing, or d) under a
1062 1062 # symlink directory.
1063 1063 if not results and matchalways:
1064 1064 visit = [f for f in dmap]
1065 1065 else:
1066 1066 visit = [f for f in dmap if f not in results and matchfn(f)]
1067 1067 visit.sort()
1068 1068
1069 1069 if unknown:
1070 1070 # unknown == True means we walked all dirs under the roots
1071 1071 # that wasn't ignored, and everything that matched was stat'ed
1072 1072 # and is already in results.
1073 1073 # The rest must thus be ignored or under a symlink.
1074 1074 audit_path = pathutil.pathauditor(self._root, cached=True)
1075 1075
1076 1076 for nf in iter(visit):
1077 1077 # If a stat for the same file was already added with a
1078 1078 # different case, don't add one for this, since that would
1079 1079 # make it appear as if the file exists under both names
1080 1080 # on disk.
1081 1081 if (
1082 1082 normalizefile
1083 1083 and normalizefile(nf, True, True) in results
1084 1084 ):
1085 1085 results[nf] = None
1086 1086 # Report ignored items in the dmap as long as they are not
1087 1087 # under a symlink directory.
1088 1088 elif audit_path.check(nf):
1089 1089 try:
1090 1090 results[nf] = lstat(join(nf))
1091 1091 # file was just ignored, no links, and exists
1092 1092 except OSError:
1093 1093 # file doesn't exist
1094 1094 results[nf] = None
1095 1095 else:
1096 1096 # It's either missing or under a symlink directory
1097 1097 # which we in this case report as missing
1098 1098 results[nf] = None
1099 1099 else:
1100 1100 # We may not have walked the full directory tree above,
1101 1101 # so stat and check everything we missed.
1102 1102 iv = iter(visit)
1103 1103 for st in util.statfiles([join(i) for i in visit]):
1104 1104 results[next(iv)] = st
1105 1105 return results
1106 1106
1107 1107 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1108 1108 # Force Rayon (Rust parallelism library) to respect the number of
1109 1109 # workers. This is a temporary workaround until Rust code knows
1110 1110 # how to read the config file.
1111 1111 numcpus = self._ui.configint(b"worker", b"numcpus")
1112 1112 if numcpus is not None:
1113 1113 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1114 1114
1115 1115 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1116 1116 if not workers_enabled:
1117 1117 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1118 1118
1119 1119 (
1120 1120 lookup,
1121 1121 modified,
1122 1122 added,
1123 1123 removed,
1124 1124 deleted,
1125 1125 clean,
1126 1126 ignored,
1127 1127 unknown,
1128 1128 warnings,
1129 1129 bad,
1130 1130 traversed,
1131 1131 dirty,
1132 1132 ) = rustmod.status(
1133 1133 self._map._map,
1134 1134 matcher,
1135 1135 self._rootdir,
1136 1136 self._ignorefiles(),
1137 1137 self._checkexec,
1138 1138 bool(list_clean),
1139 1139 bool(list_ignored),
1140 1140 bool(list_unknown),
1141 1141 bool(matcher.traversedir),
1142 1142 )
1143 1143
1144 1144 self._dirty |= dirty
1145 1145
1146 1146 if matcher.traversedir:
1147 1147 for dir in traversed:
1148 1148 matcher.traversedir(dir)
1149 1149
1150 1150 if self._ui.warn:
1151 1151 for item in warnings:
1152 1152 if isinstance(item, tuple):
1153 1153 file_path, syntax = item
1154 1154 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1155 1155 file_path,
1156 1156 syntax,
1157 1157 )
1158 1158 self._ui.warn(msg)
1159 1159 else:
1160 1160 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1161 1161 self._ui.warn(
1162 1162 msg
1163 1163 % (
1164 1164 pathutil.canonpath(
1165 1165 self._rootdir, self._rootdir, item
1166 1166 ),
1167 1167 b"No such file or directory",
1168 1168 )
1169 1169 )
1170 1170
1171 1171 for (fn, message) in bad:
1172 1172 matcher.bad(fn, encoding.strtolocal(message))
1173 1173
1174 1174 status = scmutil.status(
1175 1175 modified=modified,
1176 1176 added=added,
1177 1177 removed=removed,
1178 1178 deleted=deleted,
1179 1179 unknown=unknown,
1180 1180 ignored=ignored,
1181 1181 clean=clean,
1182 1182 )
1183 1183 return (lookup, status)
1184 1184
1185 1185 def status(self, match, subrepos, ignored, clean, unknown):
1186 1186 """Determine the status of the working copy relative to the
1187 1187 dirstate and return a pair of (unsure, status), where status is of type
1188 1188 scmutil.status and:
1189 1189
1190 1190 unsure:
1191 1191 files that might have been modified since the dirstate was
1192 1192 written, but need to be read to be sure (size is the same
1193 1193 but mtime differs)
1194 1194 status.modified:
1195 1195 files that have definitely been modified since the dirstate
1196 1196 was written (different size or mode)
1197 1197 status.clean:
1198 1198 files that have definitely not been modified since the
1199 1199 dirstate was written
1200 1200 """
1201 1201 listignored, listclean, listunknown = ignored, clean, unknown
1202 1202 lookup, modified, added, unknown, ignored = [], [], [], [], []
1203 1203 removed, deleted, clean = [], [], []
1204 1204
1205 1205 dmap = self._map
1206 1206 dmap.preload()
1207 1207
1208 1208 use_rust = True
1209 1209
1210 1210 allowed_matchers = (
1211 1211 matchmod.alwaysmatcher,
1212 1212 matchmod.exactmatcher,
1213 1213 matchmod.includematcher,
1214 1214 )
1215 1215
1216 1216 if rustmod is None:
1217 1217 use_rust = False
1218 1218 elif self._checkcase:
1219 1219 # Case-insensitive filesystems are not handled yet
1220 1220 use_rust = False
1221 1221 elif subrepos:
1222 1222 use_rust = False
1223 1223 elif sparse.enabled:
1224 1224 use_rust = False
1225 1225 elif not isinstance(match, allowed_matchers):
1226 1226 # Some matchers have yet to be implemented
1227 1227 use_rust = False
1228 1228
1229 1229 # Get the time from the filesystem so we can disambiguate files that
1230 1230 # appear modified in the present or future.
1231 1231 try:
1232 1232 mtime_boundary = timestamp.get_fs_now(self._opener)
1233 1233 except OSError:
1234 1234 # In largefiles or readonly context
1235 1235 mtime_boundary = None
1236 1236
1237 1237 if use_rust:
1238 1238 try:
1239 1239 res = self._rust_status(
1240 1240 match, listclean, listignored, listunknown
1241 1241 )
1242 1242 return res + (mtime_boundary,)
1243 1243 except rustmod.FallbackError:
1244 1244 pass
1245 1245
1246 1246 def noop(f):
1247 1247 pass
1248 1248
1249 1249 dcontains = dmap.__contains__
1250 1250 dget = dmap.__getitem__
1251 1251 ladd = lookup.append # aka "unsure"
1252 1252 madd = modified.append
1253 1253 aadd = added.append
1254 1254 uadd = unknown.append if listunknown else noop
1255 1255 iadd = ignored.append if listignored else noop
1256 1256 radd = removed.append
1257 1257 dadd = deleted.append
1258 1258 cadd = clean.append if listclean else noop
1259 1259 mexact = match.exact
1260 1260 dirignore = self._dirignore
1261 1261 checkexec = self._checkexec
1262 1262 checklink = self._checklink
1263 1263 copymap = self._map.copymap
1264 1264
1265 1265 # We need to do full walks when either
1266 1266 # - we're listing all clean files, or
1267 1267 # - match.traversedir does something, because match.traversedir should
1268 1268 # be called for every dir in the working dir
1269 1269 full = listclean or match.traversedir is not None
1270 1270 for fn, st in pycompat.iteritems(
1271 1271 self.walk(match, subrepos, listunknown, listignored, full=full)
1272 1272 ):
1273 1273 if not dcontains(fn):
1274 1274 if (listignored or mexact(fn)) and dirignore(fn):
1275 1275 if listignored:
1276 1276 iadd(fn)
1277 1277 else:
1278 1278 uadd(fn)
1279 1279 continue
1280 1280
1281 1281 t = dget(fn)
1282 1282 mode = t.mode
1283 1283 size = t.size
1284 1284
1285 1285 if not st and t.tracked:
1286 1286 dadd(fn)
1287 1287 elif t.p2_info:
1288 1288 madd(fn)
1289 1289 elif t.added:
1290 1290 aadd(fn)
1291 1291 elif t.removed:
1292 1292 radd(fn)
1293 1293 elif t.tracked:
1294 1294 if not checklink and t.has_fallback_symlink:
1295 1295 # If the file system does not support symlink, the mode
1296 1296 # might not be correctly stored in the dirstate, so do not
1297 1297 # trust it.
1298 1298 ladd(fn)
1299 1299 elif not checkexec and t.has_fallback_exec:
1300 1300 # If the file system does not support exec bits, the mode
1301 1301 # might not be correctly stored in the dirstate, so do not
1302 1302 # trust it.
1303 1303 ladd(fn)
1304 1304 elif (
1305 1305 size >= 0
1306 1306 and (
1307 1307 (size != st.st_size and size != st.st_size & _rangemask)
1308 1308 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1309 1309 )
1310 1310 or fn in copymap
1311 1311 ):
1312 1312 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1313 1313 # issue6456: Size returned may be longer due to
1314 1314 # encryption on EXT-4 fscrypt, undecided.
1315 1315 ladd(fn)
1316 1316 else:
1317 1317 madd(fn)
1318 1318 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1319 1319 # There might be a change in the future if for example the
1320 1320 # internal clock is off, but this is a case where the issues
1321 1321 # the user would face would be a lot worse and there is
1322 1322 # nothing we can really do.
1323 1323 ladd(fn)
1324 1324 elif listclean:
1325 1325 cadd(fn)
1326 1326 status = scmutil.status(
1327 1327 modified, added, removed, deleted, unknown, ignored, clean
1328 1328 )
1329 1329 return (lookup, status, mtime_boundary)
1330 1330
1331 1331 def matches(self, match):
1332 1332 """
1333 1333 return files in the dirstate (in whatever state) filtered by match
1334 1334 """
1335 1335 dmap = self._map
1336 1336 if rustmod is not None:
1337 1337 dmap = self._map._map
1338 1338
1339 1339 if match.always():
1340 1340 return dmap.keys()
1341 1341 files = match.files()
1342 1342 if match.isexact():
1343 1343 # fast path -- filter the other way around, since typically files is
1344 1344 # much smaller than dmap
1345 1345 return [f for f in files if f in dmap]
1346 1346 if match.prefix() and all(fn in dmap for fn in files):
1347 1347 # fast path -- all the values are known to be files, so just return
1348 1348 # that
1349 1349 return list(files)
1350 1350 return [f for f in dmap if match(f)]
1351 1351
1352 1352 def _actualfilename(self, tr):
1353 1353 if tr:
1354 1354 return self._pendingfilename
1355 1355 else:
1356 1356 return self._filename
1357 1357
1358 1358 def savebackup(self, tr, backupname):
1359 1359 '''Save current dirstate into backup file'''
1360 1360 filename = self._actualfilename(tr)
1361 1361 assert backupname != filename
1362 1362
1363 1363 # use '_writedirstate' instead of 'write' to write changes certainly,
1364 1364 # because the latter omits writing out if transaction is running.
1365 1365 # output file will be used to create backup of dirstate at this point.
1366 1366 if self._dirty or not self._opener.exists(filename):
1367 1367 self._writedirstate(
1368 1368 tr,
1369 1369 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1370 1370 )
1371 1371
1372 1372 if tr:
1373 1373 # ensure that subsequent tr.writepending returns True for
1374 1374 # changes written out above, even if dirstate is never
1375 1375 # changed after this
1376 1376 tr.addfilegenerator(
1377 b'dirstate',
1377 b'dirstate-1-main',
1378 1378 (self._filename,),
1379 1379 lambda f: self._writedirstate(tr, f),
1380 1380 location=b'plain',
1381 1381 )
1382 1382
1383 1383 # ensure that pending file written above is unlinked at
1384 1384 # failure, even if tr.writepending isn't invoked until the
1385 1385 # end of this transaction
1386 1386 tr.registertmp(filename, location=b'plain')
1387 1387
1388 1388 self._opener.tryunlink(backupname)
1389 1389 # hardlink backup is okay because _writedirstate is always called
1390 1390 # with an "atomictemp=True" file.
1391 1391 util.copyfile(
1392 1392 self._opener.join(filename),
1393 1393 self._opener.join(backupname),
1394 1394 hardlink=True,
1395 1395 )
1396 1396
1397 1397 def restorebackup(self, tr, backupname):
1398 1398 '''Restore dirstate by backup file'''
1399 1399 # this "invalidate()" prevents "wlock.release()" from writing
1400 1400 # changes of dirstate out after restoring from backup file
1401 1401 self.invalidate()
1402 1402 filename = self._actualfilename(tr)
1403 1403 o = self._opener
1404 1404 if util.samefile(o.join(backupname), o.join(filename)):
1405 1405 o.unlink(backupname)
1406 1406 else:
1407 1407 o.rename(backupname, filename, checkambig=True)
1408 1408
1409 1409 def clearbackup(self, tr, backupname):
1410 1410 '''Clear backup file'''
1411 1411 self._opener.unlink(backupname)
1412 1412
1413 1413 def verify(self, m1, m2):
1414 1414 """check the dirstate content again the parent manifest and yield errors"""
1415 1415 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1416 1416 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1417 1417 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1418 1418 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1419 1419 for f, entry in self.items():
1420 1420 state = entry.state
1421 1421 if state in b"nr" and f not in m1:
1422 1422 yield (missing_from_p1, f, state)
1423 1423 if state in b"a" and f in m1:
1424 1424 yield (unexpected_in_p1, f, state)
1425 1425 if state in b"m" and f not in m1 and f not in m2:
1426 1426 yield (missing_from_ps, f, state)
1427 1427 for f in m1:
1428 1428 state = self.get_entry(f).state
1429 1429 if state not in b"nrm":
1430 1430 yield (missing_from_ds, f, state)
@@ -1,768 +1,771 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {
32 b'bookmarks',
33 b'dirstate-1-main',
34 }
32 35
33 36 GEN_GROUP_ALL = b'all'
34 37 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 38 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36 39
37 40
38 41 def active(func):
39 42 def _active(self, *args, **kwds):
40 43 if self._count == 0:
41 44 raise error.ProgrammingError(
42 45 b'cannot use transaction when it is already committed/aborted'
43 46 )
44 47 return func(self, *args, **kwds)
45 48
46 49 return _active
47 50
48 51
49 52 def _playback(
50 53 journal,
51 54 report,
52 55 opener,
53 56 vfsmap,
54 57 entries,
55 58 backupentries,
56 59 unlink=True,
57 60 checkambigfiles=None,
58 61 ):
59 62 for f, o in sorted(dict(entries).items()):
60 63 if o or not unlink:
61 64 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 65 try:
63 66 fp = opener(f, b'a', checkambig=checkambig)
64 67 if fp.tell() < o:
65 68 raise error.Abort(
66 69 _(
67 70 b"attempted to truncate %s to %d bytes, but it was "
68 71 b"already %d bytes\n"
69 72 )
70 73 % (f, o, fp.tell())
71 74 )
72 75 fp.truncate(o)
73 76 fp.close()
74 77 except IOError:
75 78 report(_(b"failed to truncate %s\n") % f)
76 79 raise
77 80 else:
78 81 try:
79 82 opener.unlink(f)
80 83 except (IOError, OSError) as inst:
81 84 if inst.errno != errno.ENOENT:
82 85 raise
83 86
84 87 backupfiles = []
85 88 for l, f, b, c in backupentries:
86 89 if l not in vfsmap and c:
87 90 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 91 vfs = vfsmap[l]
89 92 try:
90 93 if f and b:
91 94 filepath = vfs.join(f)
92 95 backuppath = vfs.join(b)
93 96 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 97 try:
95 98 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 99 backupfiles.append(b)
97 100 except IOError as exc:
98 101 e_msg = stringutil.forcebytestr(exc)
99 102 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
100 103 else:
101 104 target = f or b
102 105 try:
103 106 vfs.unlink(target)
104 107 except (IOError, OSError) as inst:
105 108 if inst.errno != errno.ENOENT:
106 109 raise
107 110 except (IOError, OSError, error.Abort):
108 111 if not c:
109 112 raise
110 113
111 114 backuppath = b"%s.backupfiles" % journal
112 115 if opener.exists(backuppath):
113 116 opener.unlink(backuppath)
114 117 opener.unlink(journal)
115 118 try:
116 119 for f in backupfiles:
117 120 if opener.exists(f):
118 121 opener.unlink(f)
119 122 except (IOError, OSError, error.Abort):
120 123 # only pure backup file remains, it is sage to ignore any error
121 124 pass
122 125
123 126
124 127 class transaction(util.transactional):
125 128 def __init__(
126 129 self,
127 130 report,
128 131 opener,
129 132 vfsmap,
130 133 journalname,
131 134 undoname=None,
132 135 after=None,
133 136 createmode=None,
134 137 validator=None,
135 138 releasefn=None,
136 139 checkambigfiles=None,
137 140 name='<unnamed>',
138 141 ):
139 142 """Begin a new transaction
140 143
141 144 Begins a new transaction that allows rolling back writes in the event of
142 145 an exception.
143 146
144 147 * `after`: called after the transaction has been committed
145 148 * `createmode`: the mode of the journal file that will be created
146 149 * `releasefn`: called after releasing (with transaction and result)
147 150
148 151 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 152 which determine whether file stat ambiguity should be avoided
150 153 for corresponded files.
151 154 """
152 155 self._count = 1
153 156 self._usages = 1
154 157 self._report = report
155 158 # a vfs to the store content
156 159 self._opener = opener
157 160 # a map to access file in various {location -> vfs}
158 161 vfsmap = vfsmap.copy()
159 162 vfsmap[b''] = opener # set default value
160 163 self._vfsmap = vfsmap
161 164 self._after = after
162 165 self._offsetmap = {}
163 166 self._newfiles = set()
164 167 self._journal = journalname
165 168 self._undoname = undoname
166 169 self._queue = []
167 170 # A callback to do something just after releasing transaction.
168 171 if releasefn is None:
169 172 releasefn = lambda tr, success: None
170 173 self._releasefn = releasefn
171 174
172 175 self._checkambigfiles = set()
173 176 if checkambigfiles:
174 177 self._checkambigfiles.update(checkambigfiles)
175 178
176 179 self._names = [name]
177 180
178 181 # A dict dedicated to precisely tracking the changes introduced in the
179 182 # transaction.
180 183 self.changes = {}
181 184
182 185 # a dict of arguments to be passed to hooks
183 186 self.hookargs = {}
184 187 self._file = opener.open(self._journal, b"w+")
185 188
186 189 # a list of ('location', 'path', 'backuppath', cache) entries.
187 190 # - if 'backuppath' is empty, no file existed at backup time
188 191 # - if 'path' is empty, this is a temporary transaction file
189 192 # - if 'location' is not empty, the path is outside main opener reach.
190 193 # use 'location' value as a key in a vfsmap to find the right 'vfs'
191 194 # (cache is currently unused)
192 195 self._backupentries = []
193 196 self._backupmap = {}
194 197 self._backupjournal = b"%s.backupfiles" % self._journal
195 198 self._backupsfile = opener.open(self._backupjournal, b'w')
196 199 self._backupsfile.write(b'%d\n' % version)
197 200
198 201 if createmode is not None:
199 202 opener.chmod(self._journal, createmode & 0o666)
200 203 opener.chmod(self._backupjournal, createmode & 0o666)
201 204
202 205 # hold file generations to be performed on commit
203 206 self._filegenerators = {}
204 207 # hold callback to write pending data for hooks
205 208 self._pendingcallback = {}
206 209 # True is any pending data have been written ever
207 210 self._anypending = False
208 211 # holds callback to call when writing the transaction
209 212 self._finalizecallback = {}
210 213 # holds callback to call when validating the transaction
211 214 # should raise exception if anything is wrong
212 215 self._validatecallback = {}
213 216 if validator is not None:
214 217 self._validatecallback[b'001-userhooks'] = validator
215 218 # hold callback for post transaction close
216 219 self._postclosecallback = {}
217 220 # holds callbacks to call during abort
218 221 self._abortcallback = {}
219 222
220 223 def __repr__(self):
221 224 name = '/'.join(self._names)
222 225 return '<transaction name=%s, count=%d, usages=%d>' % (
223 226 name,
224 227 self._count,
225 228 self._usages,
226 229 )
227 230
228 231 def __del__(self):
229 232 if self._journal:
230 233 self._abort()
231 234
232 235 @property
233 236 def finalized(self):
234 237 return self._finalizecallback is None
235 238
236 239 @active
237 240 def startgroup(self):
238 241 """delay registration of file entry
239 242
240 243 This is used by strip to delay vision of strip offset. The transaction
241 244 sees either none or all of the strip actions to be done."""
242 245 self._queue.append([])
243 246
244 247 @active
245 248 def endgroup(self):
246 249 """apply delayed registration of file entry.
247 250
248 251 This is used by strip to delay vision of strip offset. The transaction
249 252 sees either none or all of the strip actions to be done."""
250 253 q = self._queue.pop()
251 254 for f, o in q:
252 255 self._addentry(f, o)
253 256
254 257 @active
255 258 def add(self, file, offset):
256 259 """record the state of an append-only file before update"""
257 260 if (
258 261 file in self._newfiles
259 262 or file in self._offsetmap
260 263 or file in self._backupmap
261 264 ):
262 265 return
263 266 if self._queue:
264 267 self._queue[-1].append((file, offset))
265 268 return
266 269
267 270 self._addentry(file, offset)
268 271
269 272 def _addentry(self, file, offset):
270 273 """add a append-only entry to memory and on-disk state"""
271 274 if (
272 275 file in self._newfiles
273 276 or file in self._offsetmap
274 277 or file in self._backupmap
275 278 ):
276 279 return
277 280 if offset:
278 281 self._offsetmap[file] = offset
279 282 else:
280 283 self._newfiles.add(file)
281 284 # add enough data to the journal to do the truncate
282 285 self._file.write(b"%s\0%d\n" % (file, offset))
283 286 self._file.flush()
284 287
285 288 @active
286 289 def addbackup(self, file, hardlink=True, location=b''):
287 290 """Adds a backup of the file to the transaction
288 291
289 292 Calling addbackup() creates a hardlink backup of the specified file
290 293 that is used to recover the file in the event of the transaction
291 294 aborting.
292 295
293 296 * `file`: the file path, relative to .hg/store
294 297 * `hardlink`: use a hardlink to quickly create the backup
295 298 """
296 299 if self._queue:
297 300 msg = b'cannot use transaction.addbackup inside "group"'
298 301 raise error.ProgrammingError(msg)
299 302
300 303 if (
301 304 file in self._newfiles
302 305 or file in self._offsetmap
303 306 or file in self._backupmap
304 307 ):
305 308 return
306 309 vfs = self._vfsmap[location]
307 310 dirname, filename = vfs.split(file)
308 311 backupfilename = b"%s.backup.%s" % (self._journal, filename)
309 312 backupfile = vfs.reljoin(dirname, backupfilename)
310 313 if vfs.exists(file):
311 314 filepath = vfs.join(file)
312 315 backuppath = vfs.join(backupfile)
313 316 util.copyfile(filepath, backuppath, hardlink=hardlink)
314 317 else:
315 318 backupfile = b''
316 319
317 320 self._addbackupentry((location, file, backupfile, False))
318 321
319 322 def _addbackupentry(self, entry):
320 323 """register a new backup entry and write it to disk"""
321 324 self._backupentries.append(entry)
322 325 self._backupmap[entry[1]] = len(self._backupentries) - 1
323 326 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
324 327 self._backupsfile.flush()
325 328
326 329 @active
327 330 def registertmp(self, tmpfile, location=b''):
328 331 """register a temporary transaction file
329 332
330 333 Such files will be deleted when the transaction exits (on both
331 334 failure and success).
332 335 """
333 336 self._addbackupentry((location, b'', tmpfile, False))
334 337
335 338 @active
336 339 def addfilegenerator(
337 340 self, genid, filenames, genfunc, order=0, location=b''
338 341 ):
339 342 """add a function to generates some files at transaction commit
340 343
341 344 The `genfunc` argument is a function capable of generating proper
342 345 content of each entry in the `filename` tuple.
343 346
344 347 At transaction close time, `genfunc` will be called with one file
345 348 object argument per entries in `filenames`.
346 349
347 350 The transaction itself is responsible for the backup, creation and
348 351 final write of such file.
349 352
350 353 The `genid` argument is used to ensure the same set of file is only
351 354 generated once. Call to `addfilegenerator` for a `genid` already
352 355 present will overwrite the old entry.
353 356
354 357 The `order` argument may be used to control the order in which multiple
355 358 generator will be executed.
356 359
357 360 The `location` arguments may be used to indicate the files are located
358 361 outside of the the standard directory for transaction. It should match
359 362 one of the key of the `transaction.vfsmap` dictionary.
360 363 """
361 364 # For now, we are unable to do proper backup and restore of custom vfs
362 365 # but for bookmarks that are handled outside this mechanism.
363 366 self._filegenerators[genid] = (order, filenames, genfunc, location)
364 367
365 368 @active
366 369 def removefilegenerator(self, genid):
367 370 """reverse of addfilegenerator, remove a file generator function"""
368 371 if genid in self._filegenerators:
369 372 del self._filegenerators[genid]
370 373
371 374 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
372 375 # write files registered for generation
373 376 any = False
374 377
375 378 if group == GEN_GROUP_ALL:
376 379 skip_post = skip_pre = False
377 380 else:
378 381 skip_pre = group == GEN_GROUP_POST_FINALIZE
379 382 skip_post = group == GEN_GROUP_PRE_FINALIZE
380 383
381 384 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
382 385 any = True
383 386 order, filenames, genfunc, location = entry
384 387
385 388 # for generation at closing, check if it's before or after finalize
386 389 is_post = id in postfinalizegenerators
387 390 if skip_post and is_post:
388 391 continue
389 392 elif skip_pre and not is_post:
390 393 continue
391 394
392 395 vfs = self._vfsmap[location]
393 396 files = []
394 397 try:
395 398 for name in filenames:
396 399 name += suffix
397 400 if suffix:
398 401 self.registertmp(name, location=location)
399 402 checkambig = False
400 403 else:
401 404 self.addbackup(name, location=location)
402 405 checkambig = (name, location) in self._checkambigfiles
403 406 files.append(
404 407 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
405 408 )
406 409 genfunc(*files)
407 410 for f in files:
408 411 f.close()
409 412 # skip discard() loop since we're sure no open file remains
410 413 del files[:]
411 414 finally:
412 415 for f in files:
413 416 f.discard()
414 417 return any
415 418
416 419 @active
417 420 def findoffset(self, file):
418 421 if file in self._newfiles:
419 422 return 0
420 423 return self._offsetmap.get(file)
421 424
422 425 @active
423 426 def readjournal(self):
424 427 self._file.seek(0)
425 428 entries = []
426 429 for l in self._file.readlines():
427 430 file, troffset = l.split(b'\0')
428 431 entries.append((file, int(troffset)))
429 432 return entries
430 433
431 434 @active
432 435 def replace(self, file, offset):
433 436 """
434 437 replace can only replace already committed entries
435 438 that are not pending in the queue
436 439 """
437 440 if file in self._newfiles:
438 441 if not offset:
439 442 return
440 443 self._newfiles.remove(file)
441 444 self._offsetmap[file] = offset
442 445 elif file in self._offsetmap:
443 446 if not offset:
444 447 del self._offsetmap[file]
445 448 self._newfiles.add(file)
446 449 else:
447 450 self._offsetmap[file] = offset
448 451 else:
449 452 raise KeyError(file)
450 453 self._file.write(b"%s\0%d\n" % (file, offset))
451 454 self._file.flush()
452 455
453 456 @active
454 457 def nest(self, name='<unnamed>'):
455 458 self._count += 1
456 459 self._usages += 1
457 460 self._names.append(name)
458 461 return self
459 462
460 463 def release(self):
461 464 if self._count > 0:
462 465 self._usages -= 1
463 466 if self._names:
464 467 self._names.pop()
465 468 # if the transaction scopes are left without being closed, fail
466 469 if self._count > 0 and self._usages == 0:
467 470 self._abort()
468 471
469 472 def running(self):
470 473 return self._count > 0
471 474
472 475 def addpending(self, category, callback):
473 476 """add a callback to be called when the transaction is pending
474 477
475 478 The transaction will be given as callback's first argument.
476 479
477 480 Category is a unique identifier to allow overwriting an old callback
478 481 with a newer callback.
479 482 """
480 483 self._pendingcallback[category] = callback
481 484
482 485 @active
483 486 def writepending(self):
484 487 """write pending file to temporary version
485 488
486 489 This is used to allow hooks to view a transaction before commit"""
487 490 categories = sorted(self._pendingcallback)
488 491 for cat in categories:
489 492 # remove callback since the data will have been flushed
490 493 any = self._pendingcallback.pop(cat)(self)
491 494 self._anypending = self._anypending or any
492 495 self._anypending |= self._generatefiles(suffix=b'.pending')
493 496 return self._anypending
494 497
495 498 @active
496 499 def hasfinalize(self, category):
497 500 """check is a callback already exist for a category"""
498 501 return category in self._finalizecallback
499 502
500 503 @active
501 504 def addfinalize(self, category, callback):
502 505 """add a callback to be called when the transaction is closed
503 506
504 507 The transaction will be given as callback's first argument.
505 508
506 509 Category is a unique identifier to allow overwriting old callbacks with
507 510 newer callbacks.
508 511 """
509 512 self._finalizecallback[category] = callback
510 513
511 514 @active
512 515 def addpostclose(self, category, callback):
513 516 """add or replace a callback to be called after the transaction closed
514 517
515 518 The transaction will be given as callback's first argument.
516 519
517 520 Category is a unique identifier to allow overwriting an old callback
518 521 with a newer callback.
519 522 """
520 523 self._postclosecallback[category] = callback
521 524
522 525 @active
523 526 def getpostclose(self, category):
524 527 """return a postclose callback added before, or None"""
525 528 return self._postclosecallback.get(category, None)
526 529
527 530 @active
528 531 def addabort(self, category, callback):
529 532 """add a callback to be called when the transaction is aborted.
530 533
531 534 The transaction will be given as the first argument to the callback.
532 535
533 536 Category is a unique identifier to allow overwriting an old callback
534 537 with a newer callback.
535 538 """
536 539 self._abortcallback[category] = callback
537 540
538 541 @active
539 542 def addvalidator(self, category, callback):
540 543 """adds a callback to be called when validating the transaction.
541 544
542 545 The transaction will be given as the first argument to the callback.
543 546
544 547 callback should raise exception if to abort transaction"""
545 548 self._validatecallback[category] = callback
546 549
547 550 @active
548 551 def close(self):
549 552 '''commit the transaction'''
550 553 if self._count == 1:
551 554 for category in sorted(self._validatecallback):
552 555 self._validatecallback[category](self)
553 556 self._validatecallback = None # Help prevent cycles.
554 557 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
555 558 while self._finalizecallback:
556 559 callbacks = self._finalizecallback
557 560 self._finalizecallback = {}
558 561 categories = sorted(callbacks)
559 562 for cat in categories:
560 563 callbacks[cat](self)
561 564 # Prevent double usage and help clear cycles.
562 565 self._finalizecallback = None
563 566 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
564 567
565 568 self._count -= 1
566 569 if self._count != 0:
567 570 return
568 571 self._file.close()
569 572 self._backupsfile.close()
570 573 # cleanup temporary files
571 574 for l, f, b, c in self._backupentries:
572 575 if l not in self._vfsmap and c:
573 576 self._report(
574 577 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
575 578 )
576 579 continue
577 580 vfs = self._vfsmap[l]
578 581 if not f and b and vfs.exists(b):
579 582 try:
580 583 vfs.unlink(b)
581 584 except (IOError, OSError, error.Abort) as inst:
582 585 if not c:
583 586 raise
584 587 # Abort may be raise by read only opener
585 588 self._report(
586 589 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
587 590 )
588 591 self._offsetmap = {}
589 592 self._newfiles = set()
590 593 self._writeundo()
591 594 if self._after:
592 595 self._after()
593 596 self._after = None # Help prevent cycles.
594 597 if self._opener.isfile(self._backupjournal):
595 598 self._opener.unlink(self._backupjournal)
596 599 if self._opener.isfile(self._journal):
597 600 self._opener.unlink(self._journal)
598 601 for l, _f, b, c in self._backupentries:
599 602 if l not in self._vfsmap and c:
600 603 self._report(
601 604 b"couldn't remove %s: unknown cache location"
602 605 b"%s\n" % (b, l)
603 606 )
604 607 continue
605 608 vfs = self._vfsmap[l]
606 609 if b and vfs.exists(b):
607 610 try:
608 611 vfs.unlink(b)
609 612 except (IOError, OSError, error.Abort) as inst:
610 613 if not c:
611 614 raise
612 615 # Abort may be raise by read only opener
613 616 self._report(
614 617 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
615 618 )
616 619 self._backupentries = []
617 620 self._journal = None
618 621
619 622 self._releasefn(self, True) # notify success of closing transaction
620 623 self._releasefn = None # Help prevent cycles.
621 624
622 625 # run post close action
623 626 categories = sorted(self._postclosecallback)
624 627 for cat in categories:
625 628 self._postclosecallback[cat](self)
626 629 # Prevent double usage and help clear cycles.
627 630 self._postclosecallback = None
628 631
629 632 @active
630 633 def abort(self):
631 634 """abort the transaction (generally called on error, or when the
632 635 transaction is not explicitly committed before going out of
633 636 scope)"""
634 637 self._abort()
635 638
636 639 def _writeundo(self):
637 640 """write transaction data for possible future undo call"""
638 641 if self._undoname is None:
639 642 return
640 643
641 644 undo_backup_path = b"%s.backupfiles" % self._undoname
642 645 undobackupfile = self._opener.open(undo_backup_path, b'w')
643 646 undobackupfile.write(b'%d\n' % version)
644 647 for l, f, b, c in self._backupentries:
645 648 if not f: # temporary file
646 649 continue
647 650 if not b:
648 651 u = b''
649 652 else:
650 653 if l not in self._vfsmap and c:
651 654 self._report(
652 655 b"couldn't remove %s: unknown cache location"
653 656 b"%s\n" % (b, l)
654 657 )
655 658 continue
656 659 vfs = self._vfsmap[l]
657 660 base, name = vfs.split(b)
658 661 assert name.startswith(self._journal), name
659 662 uname = name.replace(self._journal, self._undoname, 1)
660 663 u = vfs.reljoin(base, uname)
661 664 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
662 665 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
663 666 undobackupfile.close()
664 667
665 668 def _abort(self):
666 669 entries = self.readjournal()
667 670 self._count = 0
668 671 self._usages = 0
669 672 self._file.close()
670 673 self._backupsfile.close()
671 674
672 675 try:
673 676 if not entries and not self._backupentries:
674 677 if self._backupjournal:
675 678 self._opener.unlink(self._backupjournal)
676 679 if self._journal:
677 680 self._opener.unlink(self._journal)
678 681 return
679 682
680 683 self._report(_(b"transaction abort!\n"))
681 684
682 685 try:
683 686 for cat in sorted(self._abortcallback):
684 687 self._abortcallback[cat](self)
685 688 # Prevent double usage and help clear cycles.
686 689 self._abortcallback = None
687 690 _playback(
688 691 self._journal,
689 692 self._report,
690 693 self._opener,
691 694 self._vfsmap,
692 695 entries,
693 696 self._backupentries,
694 697 False,
695 698 checkambigfiles=self._checkambigfiles,
696 699 )
697 700 self._report(_(b"rollback completed\n"))
698 701 except BaseException as exc:
699 702 self._report(_(b"rollback failed - please run hg recover\n"))
700 703 self._report(
701 704 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
702 705 )
703 706 finally:
704 707 self._journal = None
705 708 self._releasefn(self, False) # notify failure of transaction
706 709 self._releasefn = None # Help prevent cycles.
707 710
708 711
709 712 BAD_VERSION_MSG = _(
710 713 b"journal was created by a different version of Mercurial\n"
711 714 )
712 715
713 716
714 717 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
715 718 """Rolls back the transaction contained in the given file
716 719
717 720 Reads the entries in the specified file, and the corresponding
718 721 '*.backupfiles' file, to recover from an incomplete transaction.
719 722
720 723 * `file`: a file containing a list of entries, specifying where
721 724 to truncate each file. The file should contain a list of
722 725 file\0offset pairs, delimited by newlines. The corresponding
723 726 '*.backupfiles' file should contain a list of file\0backupfile
724 727 pairs, delimited by \0.
725 728
726 729 `checkambigfiles` is a set of (path, vfs-location) tuples,
727 730 which determine whether file stat ambiguity should be avoided at
728 731 restoring corresponded files.
729 732 """
730 733 entries = []
731 734 backupentries = []
732 735
733 736 with opener.open(file) as fp:
734 737 lines = fp.readlines()
735 738 for l in lines:
736 739 try:
737 740 f, o = l.split(b'\0')
738 741 entries.append((f, int(o)))
739 742 except ValueError:
740 743 report(
741 744 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
742 745 )
743 746
744 747 backupjournal = b"%s.backupfiles" % file
745 748 if opener.exists(backupjournal):
746 749 fp = opener.open(backupjournal)
747 750 lines = fp.readlines()
748 751 if lines:
749 752 ver = lines[0][:-1]
750 753 if ver != (b'%d' % version):
751 754 report(BAD_VERSION_MSG)
752 755 else:
753 756 for line in lines[1:]:
754 757 if line:
755 758 # Shave off the trailing newline
756 759 line = line[:-1]
757 760 l, f, b, c = line.split(b'\0')
758 761 backupentries.append((l, f, b, bool(c)))
759 762
760 763 _playback(
761 764 file,
762 765 report,
763 766 opener,
764 767 vfsmap,
765 768 entries,
766 769 backupentries,
767 770 checkambigfiles=checkambigfiles,
768 771 )
General Comments 0
You need to be logged in to leave comments. Login now