##// END OF EJS Templates
transaction: do not rely on a global variable to post_finalize file...
marmoute -
r49534:21ac6aed default
parent child Browse files
Show More
@@ -1,1118 +1,1122 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import struct
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 short,
18 18 )
19 19 from .pycompat import getattr
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 obsutil,
24 24 pycompat,
25 25 requirements,
26 26 scmutil,
27 27 txnutil,
28 28 util,
29 29 )
30 30 from .utils import (
31 31 urlutil,
32 32 )
33 33
34 34 # label constants
35 35 # until 3.5, bookmarks.current was the advertised name, not
36 36 # bookmarks.active, so we must use both to avoid breaking old
37 37 # custom styles
38 38 activebookmarklabel = b'bookmarks.active bookmarks.current'
39 39
40 40
41 41 def bookmarksinstore(repo):
42 42 return requirements.BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
43 43
44 44
45 45 def bookmarksvfs(repo):
46 46 return repo.svfs if bookmarksinstore(repo) else repo.vfs
47 47
48 48
49 49 def _getbkfile(repo):
50 50 """Hook so that extensions that mess with the store can hook bm storage.
51 51
52 52 For core, this just handles wether we should see pending
53 53 bookmarks or the committed ones. Other extensions (like share)
54 54 may need to tweak this behavior further.
55 55 """
56 56 fp, pending = txnutil.trypending(
57 57 repo.root, bookmarksvfs(repo), b'bookmarks'
58 58 )
59 59 return fp
60 60
61 61
62 62 class bmstore(object):
63 63 r"""Storage for bookmarks.
64 64
65 65 This object should do all bookmark-related reads and writes, so
66 66 that it's fairly simple to replace the storage underlying
67 67 bookmarks without having to clone the logic surrounding
68 68 bookmarks. This type also should manage the active bookmark, if
69 69 any.
70 70
71 71 This particular bmstore implementation stores bookmarks as
72 72 {hash}\s{name}\n (the same format as localtags) in
73 73 .hg/bookmarks. The mapping is stored as {name: nodeid}.
74 74 """
75 75
76 76 def __init__(self, repo):
77 77 self._repo = repo
78 78 self._refmap = refmap = {} # refspec: node
79 79 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
80 80 self._clean = True
81 81 self._aclean = True
82 82 has_node = repo.changelog.index.has_node
83 83 tonode = bin # force local lookup
84 84 try:
85 85 with _getbkfile(repo) as bkfile:
86 86 for line in bkfile:
87 87 line = line.strip()
88 88 if not line:
89 89 continue
90 90 try:
91 91 sha, refspec = line.split(b' ', 1)
92 92 node = tonode(sha)
93 93 if has_node(node):
94 94 refspec = encoding.tolocal(refspec)
95 95 refmap[refspec] = node
96 96 nrefs = nodemap.get(node)
97 97 if nrefs is None:
98 98 nodemap[node] = [refspec]
99 99 else:
100 100 nrefs.append(refspec)
101 101 if nrefs[-2] > refspec:
102 102 # bookmarks weren't sorted before 4.5
103 103 nrefs.sort()
104 104 except (TypeError, ValueError):
105 105 # TypeError:
106 106 # - bin(...)
107 107 # ValueError:
108 108 # - node in nm, for non-20-bytes entry
109 109 # - split(...), for string without ' '
110 110 bookmarkspath = b'.hg/bookmarks'
111 111 if bookmarksinstore(repo):
112 112 bookmarkspath = b'.hg/store/bookmarks'
113 113 repo.ui.warn(
114 114 _(b'malformed line in %s: %r\n')
115 115 % (bookmarkspath, pycompat.bytestr(line))
116 116 )
117 117 except IOError as inst:
118 118 if inst.errno != errno.ENOENT:
119 119 raise
120 120 self._active = _readactive(repo, self)
121 121
122 122 @property
123 123 def active(self):
124 124 return self._active
125 125
126 126 @active.setter
127 127 def active(self, mark):
128 128 if mark is not None and mark not in self._refmap:
129 129 raise AssertionError(b'bookmark %s does not exist!' % mark)
130 130
131 131 self._active = mark
132 132 self._aclean = False
133 133
134 134 def __len__(self):
135 135 return len(self._refmap)
136 136
137 137 def __iter__(self):
138 138 return iter(self._refmap)
139 139
140 140 def iteritems(self):
141 141 return pycompat.iteritems(self._refmap)
142 142
143 143 def items(self):
144 144 return self._refmap.items()
145 145
146 146 # TODO: maybe rename to allnames()?
147 147 def keys(self):
148 148 return self._refmap.keys()
149 149
150 150 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
151 151 # could be self._nodemap.keys()
152 152 def values(self):
153 153 return self._refmap.values()
154 154
155 155 def __contains__(self, mark):
156 156 return mark in self._refmap
157 157
158 158 def __getitem__(self, mark):
159 159 return self._refmap[mark]
160 160
161 161 def get(self, mark, default=None):
162 162 return self._refmap.get(mark, default)
163 163
164 164 def _set(self, mark, node):
165 165 self._clean = False
166 166 if mark in self._refmap:
167 167 self._del(mark)
168 168 self._refmap[mark] = node
169 169 nrefs = self._nodemap.get(node)
170 170 if nrefs is None:
171 171 self._nodemap[node] = [mark]
172 172 else:
173 173 nrefs.append(mark)
174 174 nrefs.sort()
175 175
176 176 def _del(self, mark):
177 177 if mark not in self._refmap:
178 178 return
179 179 self._clean = False
180 180 node = self._refmap.pop(mark)
181 181 nrefs = self._nodemap[node]
182 182 if len(nrefs) == 1:
183 183 assert nrefs[0] == mark
184 184 del self._nodemap[node]
185 185 else:
186 186 nrefs.remove(mark)
187 187
188 188 def names(self, node):
189 189 """Return a sorted list of bookmarks pointing to the specified node"""
190 190 return self._nodemap.get(node, [])
191 191
192 192 def applychanges(self, repo, tr, changes):
193 193 """Apply a list of changes to bookmarks"""
194 194 bmchanges = tr.changes.get(b'bookmarks')
195 195 for name, node in changes:
196 196 old = self._refmap.get(name)
197 197 if node is None:
198 198 self._del(name)
199 199 else:
200 200 self._set(name, node)
201 201 if bmchanges is not None:
202 202 # if a previous value exist preserve the "initial" value
203 203 previous = bmchanges.get(name)
204 204 if previous is not None:
205 205 old = previous[0]
206 206 bmchanges[name] = (old, node)
207 207 self._recordchange(tr)
208 208
209 209 def _recordchange(self, tr):
210 210 """record that bookmarks have been changed in a transaction
211 211
212 212 The transaction is then responsible for updating the file content."""
213 213 location = b'' if bookmarksinstore(self._repo) else b'plain'
214 214 tr.addfilegenerator(
215 b'bookmarks', (b'bookmarks',), self._write, location=location
215 b'bookmarks',
216 (b'bookmarks',),
217 self._write,
218 location=location,
219 post_finalize=True,
216 220 )
217 221 tr.hookargs[b'bookmark_moved'] = b'1'
218 222
219 223 def _writerepo(self, repo):
220 224 """Factored out for extensibility"""
221 225 rbm = repo._bookmarks
222 226 if rbm.active not in self._refmap:
223 227 rbm.active = None
224 228 rbm._writeactive()
225 229
226 230 if bookmarksinstore(repo):
227 231 vfs = repo.svfs
228 232 lock = repo.lock()
229 233 else:
230 234 vfs = repo.vfs
231 235 lock = repo.wlock()
232 236 with lock:
233 237 with vfs(b'bookmarks', b'w', atomictemp=True, checkambig=True) as f:
234 238 self._write(f)
235 239
236 240 def _writeactive(self):
237 241 if self._aclean:
238 242 return
239 243 with self._repo.wlock():
240 244 if self._active is not None:
241 245 with self._repo.vfs(
242 246 b'bookmarks.current', b'w', atomictemp=True, checkambig=True
243 247 ) as f:
244 248 f.write(encoding.fromlocal(self._active))
245 249 else:
246 250 self._repo.vfs.tryunlink(b'bookmarks.current')
247 251 self._aclean = True
248 252
249 253 def _write(self, fp):
250 254 for name, node in sorted(pycompat.iteritems(self._refmap)):
251 255 fp.write(b"%s %s\n" % (hex(node), encoding.fromlocal(name)))
252 256 self._clean = True
253 257 self._repo.invalidatevolatilesets()
254 258
255 259 def expandname(self, bname):
256 260 if bname == b'.':
257 261 if self.active:
258 262 return self.active
259 263 else:
260 264 raise error.RepoLookupError(_(b"no active bookmark"))
261 265 return bname
262 266
263 267 def checkconflict(self, mark, force=False, target=None):
264 268 """check repo for a potential clash of mark with an existing bookmark,
265 269 branch, or hash
266 270
267 271 If target is supplied, then check that we are moving the bookmark
268 272 forward.
269 273
270 274 If force is supplied, then forcibly move the bookmark to a new commit
271 275 regardless if it is a move forward.
272 276
273 277 If divergent bookmark are to be deleted, they will be returned as list.
274 278 """
275 279 cur = self._repo[b'.'].node()
276 280 if mark in self._refmap and not force:
277 281 if target:
278 282 if self._refmap[mark] == target and target == cur:
279 283 # re-activating a bookmark
280 284 return []
281 285 rev = self._repo[target].rev()
282 286 anc = self._repo.changelog.ancestors([rev])
283 287 bmctx = self._repo[self[mark]]
284 288 divs = [
285 289 self._refmap[b]
286 290 for b in self._refmap
287 291 if b.split(b'@', 1)[0] == mark.split(b'@', 1)[0]
288 292 ]
289 293
290 294 # allow resolving a single divergent bookmark even if moving
291 295 # the bookmark across branches when a revision is specified
292 296 # that contains a divergent bookmark
293 297 if bmctx.rev() not in anc and target in divs:
294 298 return divergent2delete(self._repo, [target], mark)
295 299
296 300 deletefrom = [
297 301 b for b in divs if self._repo[b].rev() in anc or b == target
298 302 ]
299 303 delbms = divergent2delete(self._repo, deletefrom, mark)
300 304 if validdest(self._repo, bmctx, self._repo[target]):
301 305 self._repo.ui.status(
302 306 _(b"moving bookmark '%s' forward from %s\n")
303 307 % (mark, short(bmctx.node()))
304 308 )
305 309 return delbms
306 310 raise error.Abort(
307 311 _(b"bookmark '%s' already exists (use -f to force)") % mark
308 312 )
309 313 if (
310 314 mark in self._repo.branchmap()
311 315 or mark == self._repo.dirstate.branch()
312 316 ) and not force:
313 317 raise error.Abort(
314 318 _(b"a bookmark cannot have the name of an existing branch")
315 319 )
316 320 if len(mark) > 3 and not force:
317 321 try:
318 322 shadowhash = scmutil.isrevsymbol(self._repo, mark)
319 323 except error.LookupError: # ambiguous identifier
320 324 shadowhash = False
321 325 if shadowhash:
322 326 self._repo.ui.warn(
323 327 _(
324 328 b"bookmark %s matches a changeset hash\n"
325 329 b"(did you leave a -r out of an 'hg bookmark' "
326 330 b"command?)\n"
327 331 )
328 332 % mark
329 333 )
330 334 return []
331 335
332 336
333 337 def _readactive(repo, marks):
334 338 """
335 339 Get the active bookmark. We can have an active bookmark that updates
336 340 itself as we commit. This function returns the name of that bookmark.
337 341 It is stored in .hg/bookmarks.current
338 342 """
339 343 # No readline() in osutil.posixfile, reading everything is
340 344 # cheap.
341 345 content = repo.vfs.tryread(b'bookmarks.current')
342 346 mark = encoding.tolocal((content.splitlines() or [b''])[0])
343 347 if mark == b'' or mark not in marks:
344 348 mark = None
345 349 return mark
346 350
347 351
348 352 def activate(repo, mark):
349 353 """
350 354 Set the given bookmark to be 'active', meaning that this bookmark will
351 355 follow new commits that are made.
352 356 The name is recorded in .hg/bookmarks.current
353 357 """
354 358 repo._bookmarks.active = mark
355 359 repo._bookmarks._writeactive()
356 360
357 361
358 362 def deactivate(repo):
359 363 """
360 364 Unset the active bookmark in this repository.
361 365 """
362 366 repo._bookmarks.active = None
363 367 repo._bookmarks._writeactive()
364 368
365 369
366 370 def isactivewdirparent(repo):
367 371 """
368 372 Tell whether the 'active' bookmark (the one that follows new commits)
369 373 points to one of the parents of the current working directory (wdir).
370 374
371 375 While this is normally the case, it can on occasion be false; for example,
372 376 immediately after a pull, the active bookmark can be moved to point
373 377 to a place different than the wdir. This is solved by running `hg update`.
374 378 """
375 379 mark = repo._activebookmark
376 380 marks = repo._bookmarks
377 381 parents = [p.node() for p in repo[None].parents()]
378 382 return mark in marks and marks[mark] in parents
379 383
380 384
381 385 def divergent2delete(repo, deletefrom, bm):
382 386 """find divergent versions of bm on nodes in deletefrom.
383 387
384 388 the list of bookmark to delete."""
385 389 todelete = []
386 390 marks = repo._bookmarks
387 391 divergent = [
388 392 b for b in marks if b.split(b'@', 1)[0] == bm.split(b'@', 1)[0]
389 393 ]
390 394 for mark in divergent:
391 395 if mark == b'@' or b'@' not in mark:
392 396 # can't be divergent by definition
393 397 continue
394 398 if mark and marks[mark] in deletefrom:
395 399 if mark != bm:
396 400 todelete.append(mark)
397 401 return todelete
398 402
399 403
400 404 def headsforactive(repo):
401 405 """Given a repo with an active bookmark, return divergent bookmark nodes.
402 406
403 407 Args:
404 408 repo: A repository with an active bookmark.
405 409
406 410 Returns:
407 411 A list of binary node ids that is the full list of other
408 412 revisions with bookmarks divergent from the active bookmark. If
409 413 there were no divergent bookmarks, then this list will contain
410 414 only one entry.
411 415 """
412 416 if not repo._activebookmark:
413 417 raise ValueError(
414 418 b'headsforactive() only makes sense with an active bookmark'
415 419 )
416 420 name = repo._activebookmark.split(b'@', 1)[0]
417 421 heads = []
418 422 for mark, n in pycompat.iteritems(repo._bookmarks):
419 423 if mark.split(b'@', 1)[0] == name:
420 424 heads.append(n)
421 425 return heads
422 426
423 427
424 428 def calculateupdate(ui, repo):
425 429 """Return a tuple (activemark, movemarkfrom) indicating the active bookmark
426 430 and where to move the active bookmark from, if needed."""
427 431 checkout, movemarkfrom = None, None
428 432 activemark = repo._activebookmark
429 433 if isactivewdirparent(repo):
430 434 movemarkfrom = repo[b'.'].node()
431 435 elif activemark:
432 436 ui.status(_(b"updating to active bookmark %s\n") % activemark)
433 437 checkout = activemark
434 438 return (checkout, movemarkfrom)
435 439
436 440
437 441 def update(repo, parents, node):
438 442 deletefrom = parents
439 443 marks = repo._bookmarks
440 444 active = marks.active
441 445 if not active:
442 446 return False
443 447
444 448 bmchanges = []
445 449 if marks[active] in parents:
446 450 new = repo[node]
447 451 divs = [
448 452 repo[marks[b]]
449 453 for b in marks
450 454 if b.split(b'@', 1)[0] == active.split(b'@', 1)[0]
451 455 ]
452 456 anc = repo.changelog.ancestors([new.rev()])
453 457 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
454 458 if validdest(repo, repo[marks[active]], new):
455 459 bmchanges.append((active, new.node()))
456 460
457 461 for bm in divergent2delete(repo, deletefrom, active):
458 462 bmchanges.append((bm, None))
459 463
460 464 if bmchanges:
461 465 with repo.lock(), repo.transaction(b'bookmark') as tr:
462 466 marks.applychanges(repo, tr, bmchanges)
463 467 return bool(bmchanges)
464 468
465 469
466 470 def isdivergent(b):
467 471 return b'@' in b and not b.endswith(b'@')
468 472
469 473
470 474 def listbinbookmarks(repo):
471 475 # We may try to list bookmarks on a repo type that does not
472 476 # support it (e.g., statichttprepository).
473 477 marks = getattr(repo, '_bookmarks', {})
474 478
475 479 hasnode = repo.changelog.hasnode
476 480 for k, v in pycompat.iteritems(marks):
477 481 # don't expose local divergent bookmarks
478 482 if hasnode(v) and not isdivergent(k):
479 483 yield k, v
480 484
481 485
482 486 def listbookmarks(repo):
483 487 d = {}
484 488 for book, node in listbinbookmarks(repo):
485 489 d[book] = hex(node)
486 490 return d
487 491
488 492
489 493 def pushbookmark(repo, key, old, new):
490 494 if isdivergent(key):
491 495 return False
492 496 if bookmarksinstore(repo):
493 497 wlock = util.nullcontextmanager()
494 498 else:
495 499 wlock = repo.wlock()
496 500 with wlock, repo.lock(), repo.transaction(b'bookmarks') as tr:
497 501 marks = repo._bookmarks
498 502 existing = hex(marks.get(key, b''))
499 503 if existing != old and existing != new:
500 504 return False
501 505 if new == b'':
502 506 changes = [(key, None)]
503 507 else:
504 508 if new not in repo:
505 509 return False
506 510 changes = [(key, repo[new].node())]
507 511 marks.applychanges(repo, tr, changes)
508 512 return True
509 513
510 514
511 515 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
512 516 """Compare bookmarks between srcmarks and dstmarks
513 517
514 518 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
515 519 differ, invalid)", each are list of bookmarks below:
516 520
517 521 :addsrc: added on src side (removed on dst side, perhaps)
518 522 :adddst: added on dst side (removed on src side, perhaps)
519 523 :advsrc: advanced on src side
520 524 :advdst: advanced on dst side
521 525 :diverge: diverge
522 526 :differ: changed, but changeset referred on src is unknown on dst
523 527 :invalid: unknown on both side
524 528 :same: same on both side
525 529
526 530 Each elements of lists in result tuple is tuple "(bookmark name,
527 531 changeset ID on source side, changeset ID on destination
528 532 side)". Each changeset ID is a binary node or None.
529 533
530 534 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
531 535 "invalid" list may be unknown for repo.
532 536
533 537 If "targets" is specified, only bookmarks listed in it are
534 538 examined.
535 539 """
536 540
537 541 if targets:
538 542 bset = set(targets)
539 543 else:
540 544 srcmarkset = set(srcmarks)
541 545 dstmarkset = set(dstmarks)
542 546 bset = srcmarkset | dstmarkset
543 547
544 548 results = ([], [], [], [], [], [], [], [])
545 549 addsrc = results[0].append
546 550 adddst = results[1].append
547 551 advsrc = results[2].append
548 552 advdst = results[3].append
549 553 diverge = results[4].append
550 554 differ = results[5].append
551 555 invalid = results[6].append
552 556 same = results[7].append
553 557
554 558 for b in sorted(bset):
555 559 if b not in srcmarks:
556 560 if b in dstmarks:
557 561 adddst((b, None, dstmarks[b]))
558 562 else:
559 563 invalid((b, None, None))
560 564 elif b not in dstmarks:
561 565 addsrc((b, srcmarks[b], None))
562 566 else:
563 567 scid = srcmarks[b]
564 568 dcid = dstmarks[b]
565 569 if scid == dcid:
566 570 same((b, scid, dcid))
567 571 elif scid in repo and dcid in repo:
568 572 sctx = repo[scid]
569 573 dctx = repo[dcid]
570 574 if sctx.rev() < dctx.rev():
571 575 if validdest(repo, sctx, dctx):
572 576 advdst((b, scid, dcid))
573 577 else:
574 578 diverge((b, scid, dcid))
575 579 else:
576 580 if validdest(repo, dctx, sctx):
577 581 advsrc((b, scid, dcid))
578 582 else:
579 583 diverge((b, scid, dcid))
580 584 else:
581 585 # it is too expensive to examine in detail, in this case
582 586 differ((b, scid, dcid))
583 587
584 588 return results
585 589
586 590
587 591 def _diverge(ui, b, path, localmarks, remotenode):
588 592 """Return appropriate diverged bookmark for specified ``path``
589 593
590 594 This returns None, if it is failed to assign any divergent
591 595 bookmark name.
592 596
593 597 This reuses already existing one with "@number" suffix, if it
594 598 refers ``remotenode``.
595 599 """
596 600 if b == b'@':
597 601 b = b''
598 602 # try to use an @pathalias suffix
599 603 # if an @pathalias already exists, we overwrite (update) it
600 604 if path.startswith(b"file:"):
601 605 path = urlutil.url(path).path
602 606 for name, p in urlutil.list_paths(ui):
603 607 loc = p.rawloc
604 608 if loc.startswith(b"file:"):
605 609 loc = urlutil.url(loc).path
606 610 if path == loc:
607 611 return b'%s@%s' % (b, name)
608 612
609 613 # assign a unique "@number" suffix newly
610 614 for x in range(1, 100):
611 615 n = b'%s@%d' % (b, x)
612 616 if n not in localmarks or localmarks[n] == remotenode:
613 617 return n
614 618
615 619 return None
616 620
617 621
618 622 def unhexlifybookmarks(marks):
619 623 binremotemarks = {}
620 624 for name, node in marks.items():
621 625 binremotemarks[name] = bin(node)
622 626 return binremotemarks
623 627
624 628
625 629 _binaryentry = struct.Struct(b'>20sH')
626 630
627 631
628 632 def binaryencode(repo, bookmarks):
629 633 """encode a '(bookmark, node)' iterable into a binary stream
630 634
631 635 the binary format is:
632 636
633 637 <node><bookmark-length><bookmark-name>
634 638
635 639 :node: is a 20 bytes binary node,
636 640 :bookmark-length: an unsigned short,
637 641 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
638 642
639 643 wdirid (all bits set) will be used as a special value for "missing"
640 644 """
641 645 binarydata = []
642 646 for book, node in bookmarks:
643 647 if not node: # None or ''
644 648 node = repo.nodeconstants.wdirid
645 649 binarydata.append(_binaryentry.pack(node, len(book)))
646 650 binarydata.append(book)
647 651 return b''.join(binarydata)
648 652
649 653
650 654 def binarydecode(repo, stream):
651 655 """decode a binary stream into an '(bookmark, node)' iterable
652 656
653 657 the binary format is:
654 658
655 659 <node><bookmark-length><bookmark-name>
656 660
657 661 :node: is a 20 bytes binary node,
658 662 :bookmark-length: an unsigned short,
659 663 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
660 664
661 665 wdirid (all bits set) will be used as a special value for "missing"
662 666 """
663 667 entrysize = _binaryentry.size
664 668 books = []
665 669 while True:
666 670 entry = stream.read(entrysize)
667 671 if len(entry) < entrysize:
668 672 if entry:
669 673 raise error.Abort(_(b'bad bookmark stream'))
670 674 break
671 675 node, length = _binaryentry.unpack(entry)
672 676 bookmark = stream.read(length)
673 677 if len(bookmark) < length:
674 678 if entry:
675 679 raise error.Abort(_(b'bad bookmark stream'))
676 680 if node == repo.nodeconstants.wdirid:
677 681 node = None
678 682 books.append((bookmark, node))
679 683 return books
680 684
681 685
682 686 def mirroring_remote(ui, repo, remotemarks):
683 687 """computes the bookmark changes that set the local bookmarks to
684 688 remotemarks"""
685 689 changed = []
686 690 localmarks = repo._bookmarks
687 691 for (b, id) in pycompat.iteritems(remotemarks):
688 692 if id != localmarks.get(b, None) and id in repo:
689 693 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
690 694 for b in localmarks:
691 695 if b not in remotemarks:
692 696 changed.append(
693 697 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
694 698 )
695 699 return changed
696 700
697 701
698 702 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
699 703 """computes the bookmark changes that merge remote bookmarks into the
700 704 local bookmarks, based on comparebookmarks"""
701 705 localmarks = repo._bookmarks
702 706 (
703 707 addsrc,
704 708 adddst,
705 709 advsrc,
706 710 advdst,
707 711 diverge,
708 712 differ,
709 713 invalid,
710 714 same,
711 715 ) = comparebookmarks(repo, remotemarks, localmarks)
712 716
713 717 status = ui.status
714 718 warn = ui.warn
715 719 if ui.configbool(b'ui', b'quietbookmarkmove'):
716 720 status = warn = ui.debug
717 721
718 722 explicit = set(explicit)
719 723 changed = []
720 724 for b, scid, dcid in addsrc:
721 725 if scid in repo: # add remote bookmarks for changes we already have
722 726 changed.append(
723 727 (b, scid, status, _(b"adding remote bookmark %s\n") % b)
724 728 )
725 729 elif b in explicit:
726 730 explicit.remove(b)
727 731 ui.warn(
728 732 _(b"remote bookmark %s points to locally missing %s\n")
729 733 % (b, hex(scid)[:12])
730 734 )
731 735
732 736 for b, scid, dcid in advsrc:
733 737 changed.append((b, scid, status, _(b"updating bookmark %s\n") % b))
734 738 # remove normal movement from explicit set
735 739 explicit.difference_update(d[0] for d in changed)
736 740
737 741 for b, scid, dcid in diverge:
738 742 if b in explicit:
739 743 explicit.discard(b)
740 744 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
741 745 else:
742 746 db = _diverge(ui, b, path, localmarks, scid)
743 747 if db:
744 748 changed.append(
745 749 (
746 750 db,
747 751 scid,
748 752 warn,
749 753 _(b"divergent bookmark %s stored as %s\n") % (b, db),
750 754 )
751 755 )
752 756 else:
753 757 warn(
754 758 _(
755 759 b"warning: failed to assign numbered name "
756 760 b"to divergent bookmark %s\n"
757 761 )
758 762 % b
759 763 )
760 764 for b, scid, dcid in adddst + advdst:
761 765 if b in explicit:
762 766 explicit.discard(b)
763 767 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
764 768 for b, scid, dcid in differ:
765 769 if b in explicit:
766 770 explicit.remove(b)
767 771 ui.warn(
768 772 _(b"remote bookmark %s points to locally missing %s\n")
769 773 % (b, hex(scid)[:12])
770 774 )
771 775 return changed
772 776
773 777
774 778 def updatefromremote(
775 779 ui, repo, remotemarks, path, trfunc, explicit=(), mode=None
776 780 ):
777 781 if mode == b'ignore':
778 782 # This should move to an higher level to avoid fetching bookmark at all
779 783 return
780 784 ui.debug(b"checking for updated bookmarks\n")
781 785 if mode == b'mirror':
782 786 changed = mirroring_remote(ui, repo, remotemarks)
783 787 else:
784 788 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
785 789
786 790 if changed:
787 791 tr = trfunc()
788 792 changes = []
789 793 key = lambda t: (t[0], t[1] or b'')
790 794 for b, node, writer, msg in sorted(changed, key=key):
791 795 changes.append((b, node))
792 796 writer(msg)
793 797 repo._bookmarks.applychanges(repo, tr, changes)
794 798
795 799
796 800 def incoming(ui, repo, peer, mode=None):
797 801 """Show bookmarks incoming from other to repo"""
798 802 if mode == b'ignore':
799 803 ui.status(_(b"bookmarks exchange disabled with this path\n"))
800 804 return 0
801 805 ui.status(_(b"searching for changed bookmarks\n"))
802 806
803 807 with peer.commandexecutor() as e:
804 808 remotemarks = unhexlifybookmarks(
805 809 e.callcommand(
806 810 b'listkeys',
807 811 {
808 812 b'namespace': b'bookmarks',
809 813 },
810 814 ).result()
811 815 )
812 816
813 817 incomings = []
814 818 if ui.debugflag:
815 819 getid = lambda id: id
816 820 else:
817 821 getid = lambda id: id[:12]
818 822 if ui.verbose:
819 823
820 824 def add(b, id, st):
821 825 incomings.append(b" %-25s %s %s\n" % (b, getid(id), st))
822 826
823 827 else:
824 828
825 829 def add(b, id, st):
826 830 incomings.append(b" %-25s %s\n" % (b, getid(id)))
827 831
828 832 if mode == b'mirror':
829 833 localmarks = repo._bookmarks
830 834 allmarks = set(remotemarks.keys()) | set(localmarks.keys())
831 835 for b in sorted(allmarks):
832 836 loc = localmarks.get(b)
833 837 rem = remotemarks.get(b)
834 838 if loc == rem:
835 839 continue
836 840 elif loc is None:
837 841 add(b, hex(rem), _(b'added'))
838 842 elif rem is None:
839 843 add(b, hex(repo.nullid), _(b'removed'))
840 844 else:
841 845 add(b, hex(rem), _(b'changed'))
842 846 else:
843 847 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
844 848 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
845 849
846 850 for b, scid, dcid in addsrc:
847 851 # i18n: "added" refers to a bookmark
848 852 add(b, hex(scid), _(b'added'))
849 853 for b, scid, dcid in advsrc:
850 854 # i18n: "advanced" refers to a bookmark
851 855 add(b, hex(scid), _(b'advanced'))
852 856 for b, scid, dcid in diverge:
853 857 # i18n: "diverged" refers to a bookmark
854 858 add(b, hex(scid), _(b'diverged'))
855 859 for b, scid, dcid in differ:
856 860 # i18n: "changed" refers to a bookmark
857 861 add(b, hex(scid), _(b'changed'))
858 862
859 863 if not incomings:
860 864 ui.status(_(b"no changed bookmarks found\n"))
861 865 return 1
862 866
863 867 for s in sorted(incomings):
864 868 ui.write(s)
865 869
866 870 return 0
867 871
868 872
869 873 def outgoing(ui, repo, other):
870 874 """Show bookmarks outgoing from repo to other"""
871 875 ui.status(_(b"searching for changed bookmarks\n"))
872 876
873 877 remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks'))
874 878 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
875 879 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
876 880
877 881 outgoings = []
878 882 if ui.debugflag:
879 883 getid = lambda id: id
880 884 else:
881 885 getid = lambda id: id[:12]
882 886 if ui.verbose:
883 887
884 888 def add(b, id, st):
885 889 outgoings.append(b" %-25s %s %s\n" % (b, getid(id), st))
886 890
887 891 else:
888 892
889 893 def add(b, id, st):
890 894 outgoings.append(b" %-25s %s\n" % (b, getid(id)))
891 895
892 896 for b, scid, dcid in addsrc:
893 897 # i18n: "added refers to a bookmark
894 898 add(b, hex(scid), _(b'added'))
895 899 for b, scid, dcid in adddst:
896 900 # i18n: "deleted" refers to a bookmark
897 901 add(b, b' ' * 40, _(b'deleted'))
898 902 for b, scid, dcid in advsrc:
899 903 # i18n: "advanced" refers to a bookmark
900 904 add(b, hex(scid), _(b'advanced'))
901 905 for b, scid, dcid in diverge:
902 906 # i18n: "diverged" refers to a bookmark
903 907 add(b, hex(scid), _(b'diverged'))
904 908 for b, scid, dcid in differ:
905 909 # i18n: "changed" refers to a bookmark
906 910 add(b, hex(scid), _(b'changed'))
907 911
908 912 if not outgoings:
909 913 ui.status(_(b"no changed bookmarks found\n"))
910 914 return 1
911 915
912 916 for s in sorted(outgoings):
913 917 ui.write(s)
914 918
915 919 return 0
916 920
917 921
918 922 def summary(repo, peer):
919 923 """Compare bookmarks between repo and other for "hg summary" output
920 924
921 925 This returns "(# of incoming, # of outgoing)" tuple.
922 926 """
923 927 with peer.commandexecutor() as e:
924 928 remotemarks = unhexlifybookmarks(
925 929 e.callcommand(
926 930 b'listkeys',
927 931 {
928 932 b'namespace': b'bookmarks',
929 933 },
930 934 ).result()
931 935 )
932 936
933 937 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
934 938 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
935 939 return (len(addsrc), len(adddst))
936 940
937 941
938 942 def validdest(repo, old, new):
939 943 """Is the new bookmark destination a valid update from the old one"""
940 944 repo = repo.unfiltered()
941 945 if old == new:
942 946 # Old == new -> nothing to update.
943 947 return False
944 948 elif not old:
945 949 # old is nullrev, anything is valid.
946 950 # (new != nullrev has been excluded by the previous check)
947 951 return True
948 952 elif repo.obsstore:
949 953 return new.node() in obsutil.foreground(repo, [old.node()])
950 954 else:
951 955 # still an independent clause as it is lazier (and therefore faster)
952 956 return old.isancestorof(new)
953 957
954 958
955 959 def checkformat(repo, mark):
956 960 """return a valid version of a potential bookmark name
957 961
958 962 Raises an abort error if the bookmark name is not valid.
959 963 """
960 964 mark = mark.strip()
961 965 if not mark:
962 966 raise error.InputError(
963 967 _(b"bookmark names cannot consist entirely of whitespace")
964 968 )
965 969 scmutil.checknewlabel(repo, mark, b'bookmark')
966 970 return mark
967 971
968 972
969 973 def delete(repo, tr, names):
970 974 """remove a mark from the bookmark store
971 975
972 976 Raises an abort error if mark does not exist.
973 977 """
974 978 marks = repo._bookmarks
975 979 changes = []
976 980 for mark in names:
977 981 if mark not in marks:
978 982 raise error.InputError(_(b"bookmark '%s' does not exist") % mark)
979 983 if mark == repo._activebookmark:
980 984 deactivate(repo)
981 985 changes.append((mark, None))
982 986 marks.applychanges(repo, tr, changes)
983 987
984 988
985 989 def rename(repo, tr, old, new, force=False, inactive=False):
986 990 """rename a bookmark from old to new
987 991
988 992 If force is specified, then the new name can overwrite an existing
989 993 bookmark.
990 994
991 995 If inactive is specified, then do not activate the new bookmark.
992 996
993 997 Raises an abort error if old is not in the bookmark store.
994 998 """
995 999 marks = repo._bookmarks
996 1000 mark = checkformat(repo, new)
997 1001 if old not in marks:
998 1002 raise error.InputError(_(b"bookmark '%s' does not exist") % old)
999 1003 changes = []
1000 1004 for bm in marks.checkconflict(mark, force):
1001 1005 changes.append((bm, None))
1002 1006 changes.extend([(mark, marks[old]), (old, None)])
1003 1007 marks.applychanges(repo, tr, changes)
1004 1008 if repo._activebookmark == old and not inactive:
1005 1009 activate(repo, mark)
1006 1010
1007 1011
1008 1012 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
1009 1013 """add a list of bookmarks
1010 1014
1011 1015 If force is specified, then the new name can overwrite an existing
1012 1016 bookmark.
1013 1017
1014 1018 If inactive is specified, then do not activate any bookmark. Otherwise, the
1015 1019 first bookmark is activated.
1016 1020
1017 1021 Raises an abort error if old is not in the bookmark store.
1018 1022 """
1019 1023 marks = repo._bookmarks
1020 1024 cur = repo[b'.'].node()
1021 1025 newact = None
1022 1026 changes = []
1023 1027
1024 1028 # unhide revs if any
1025 1029 if rev:
1026 1030 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1027 1031
1028 1032 ctx = scmutil.revsingle(repo, rev, None)
1029 1033 # bookmarking wdir means creating a bookmark on p1 and activating it
1030 1034 activatenew = not inactive and ctx.rev() is None
1031 1035 if ctx.node() is None:
1032 1036 ctx = ctx.p1()
1033 1037 tgt = ctx.node()
1034 1038 assert tgt
1035 1039
1036 1040 for mark in names:
1037 1041 mark = checkformat(repo, mark)
1038 1042 if newact is None:
1039 1043 newact = mark
1040 1044 if inactive and mark == repo._activebookmark:
1041 1045 deactivate(repo)
1042 1046 continue
1043 1047 for bm in marks.checkconflict(mark, force, tgt):
1044 1048 changes.append((bm, None))
1045 1049 changes.append((mark, tgt))
1046 1050
1047 1051 # nothing changed but for the one deactivated above
1048 1052 if not changes:
1049 1053 return
1050 1054
1051 1055 if ctx.hidden():
1052 1056 repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % ctx.hex()[:12])
1053 1057
1054 1058 if ctx.obsolete():
1055 1059 msg = obsutil._getfilteredreason(repo, ctx.hex()[:12], ctx)
1056 1060 repo.ui.warn(b"(%s)\n" % msg)
1057 1061
1058 1062 marks.applychanges(repo, tr, changes)
1059 1063 if activatenew and cur == marks[newact]:
1060 1064 activate(repo, newact)
1061 1065 elif cur != tgt and newact == repo._activebookmark:
1062 1066 deactivate(repo)
1063 1067
1064 1068
1065 1069 def _printbookmarks(ui, repo, fm, bmarks):
1066 1070 """private method to print bookmarks
1067 1071
1068 1072 Provides a way for extensions to control how bookmarks are printed (e.g.
1069 1073 prepend or postpend names)
1070 1074 """
1071 1075 hexfn = fm.hexfunc
1072 1076 if len(bmarks) == 0 and fm.isplain():
1073 1077 ui.status(_(b"no bookmarks set\n"))
1074 1078 for bmark, (n, prefix, label) in sorted(pycompat.iteritems(bmarks)):
1075 1079 fm.startitem()
1076 1080 fm.context(repo=repo)
1077 1081 if not ui.quiet:
1078 1082 fm.plain(b' %s ' % prefix, label=label)
1079 1083 fm.write(b'bookmark', b'%s', bmark, label=label)
1080 1084 pad = b" " * (25 - encoding.colwidth(bmark))
1081 1085 fm.condwrite(
1082 1086 not ui.quiet,
1083 1087 b'rev node',
1084 1088 pad + b' %d:%s',
1085 1089 repo.changelog.rev(n),
1086 1090 hexfn(n),
1087 1091 label=label,
1088 1092 )
1089 1093 fm.data(active=(activebookmarklabel in label))
1090 1094 fm.plain(b'\n')
1091 1095
1092 1096
1093 1097 def printbookmarks(ui, repo, fm, names=None):
1094 1098 """print bookmarks by the given formatter
1095 1099
1096 1100 Provides a way for extensions to control how bookmarks are printed.
1097 1101 """
1098 1102 marks = repo._bookmarks
1099 1103 bmarks = {}
1100 1104 for bmark in names or marks:
1101 1105 if bmark not in marks:
1102 1106 raise error.InputError(_(b"bookmark '%s' does not exist") % bmark)
1103 1107 active = repo._activebookmark
1104 1108 if bmark == active:
1105 1109 prefix, label = b'*', activebookmarklabel
1106 1110 else:
1107 1111 prefix, label = b' ', b''
1108 1112
1109 1113 bmarks[bmark] = (marks[bmark], prefix, label)
1110 1114 _printbookmarks(ui, repo, fm, bmarks)
1111 1115
1112 1116
1113 1117 def preparehookargs(name, old, new):
1114 1118 if new is None:
1115 1119 new = b''
1116 1120 if old is None:
1117 1121 old = b''
1118 1122 return {b'bookmark': name, b'node': hex(new), b'oldnode': hex(old)}
@@ -1,1477 +1,1481 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15 import uuid
16 16
17 17 from .i18n import _
18 18 from .pycompat import delattr
19 19
20 20 from hgdemandimport import tracing
21 21
22 22 from . import (
23 23 dirstatemap,
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 node,
28 28 pathutil,
29 29 policy,
30 30 pycompat,
31 31 scmutil,
32 32 sparse,
33 33 util,
34 34 )
35 35
36 36 from .dirstateutils import (
37 37 timestamp,
38 38 )
39 39
40 40 from .interfaces import (
41 41 dirstate as intdirstate,
42 42 util as interfaceutil,
43 43 )
44 44
45 45 parsers = policy.importmod('parsers')
46 46 rustmod = policy.importrust('dirstate')
47 47
48 48 HAS_FAST_DIRSTATE_V2 = rustmod is not None
49 49
50 50 propertycache = util.propertycache
51 51 filecache = scmutil.filecache
52 52 _rangemask = dirstatemap.rangemask
53 53
54 54 DirstateItem = dirstatemap.DirstateItem
55 55
56 56
57 57 class repocache(filecache):
58 58 """filecache for files in .hg/"""
59 59
60 60 def join(self, obj, fname):
61 61 return obj._opener.join(fname)
62 62
63 63
64 64 class rootcache(filecache):
65 65 """filecache for files in the repository root"""
66 66
67 67 def join(self, obj, fname):
68 68 return obj._join(fname)
69 69
70 70
71 71 def requires_parents_change(func):
72 72 def wrap(self, *args, **kwargs):
73 73 if not self.pendingparentchange():
74 74 msg = 'calling `%s` outside of a parentchange context'
75 75 msg %= func.__name__
76 76 raise error.ProgrammingError(msg)
77 77 return func(self, *args, **kwargs)
78 78
79 79 return wrap
80 80
81 81
82 82 def requires_no_parents_change(func):
83 83 def wrap(self, *args, **kwargs):
84 84 if self.pendingparentchange():
85 85 msg = 'calling `%s` inside of a parentchange context'
86 86 msg %= func.__name__
87 87 raise error.ProgrammingError(msg)
88 88 return func(self, *args, **kwargs)
89 89
90 90 return wrap
91 91
92 92
93 93 @interfaceutil.implementer(intdirstate.idirstate)
94 94 class dirstate(object):
95 95 def __init__(
96 96 self,
97 97 opener,
98 98 ui,
99 99 root,
100 100 validate,
101 101 sparsematchfn,
102 102 nodeconstants,
103 103 use_dirstate_v2,
104 104 use_tracked_key=False,
105 105 ):
106 106 """Create a new dirstate object.
107 107
108 108 opener is an open()-like callable that can be used to open the
109 109 dirstate file; root is the root of the directory tracked by
110 110 the dirstate.
111 111 """
112 112 self._use_dirstate_v2 = use_dirstate_v2
113 113 self._use_tracked_key = use_tracked_key
114 114 self._nodeconstants = nodeconstants
115 115 self._opener = opener
116 116 self._validate = validate
117 117 self._root = root
118 118 self._sparsematchfn = sparsematchfn
119 119 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
120 120 # UNC path pointing to root share (issue4557)
121 121 self._rootdir = pathutil.normasprefix(root)
122 122 # True is any internal state may be different
123 123 self._dirty = False
124 124 # True if the set of tracked file may be different
125 125 self._dirty_tracked_set = False
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._filename_tk = b'dirstate-tracked-key'
131 131 self._pendingfilename = b'%s.pending' % self._filename
132 132 self._plchangecallbacks = {}
133 133 self._origpl = None
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 """build a callable that returns flags associated with a filename
243 243
244 244 The information is extracted from three possible layers:
245 245 1. the file system if it supports the information
246 246 2. the "fallback" information stored in the dirstate if any
247 247 3. a more expensive mechanism inferring the flags from the parents.
248 248 """
249 249
250 250 # small hack to cache the result of buildfallback()
251 251 fallback_func = []
252 252
253 253 def get_flags(x):
254 254 entry = None
255 255 fallback_value = None
256 256 try:
257 257 st = os.lstat(self._join(x))
258 258 except OSError:
259 259 return b''
260 260
261 261 if self._checklink:
262 262 if util.statislink(st):
263 263 return b'l'
264 264 else:
265 265 entry = self.get_entry(x)
266 266 if entry.has_fallback_symlink:
267 267 if entry.fallback_symlink:
268 268 return b'l'
269 269 else:
270 270 if not fallback_func:
271 271 fallback_func.append(buildfallback())
272 272 fallback_value = fallback_func[0](x)
273 273 if b'l' in fallback_value:
274 274 return b'l'
275 275
276 276 if self._checkexec:
277 277 if util.statisexec(st):
278 278 return b'x'
279 279 else:
280 280 if entry is None:
281 281 entry = self.get_entry(x)
282 282 if entry.has_fallback_exec:
283 283 if entry.fallback_exec:
284 284 return b'x'
285 285 else:
286 286 if fallback_value is None:
287 287 if not fallback_func:
288 288 fallback_func.append(buildfallback())
289 289 fallback_value = fallback_func[0](x)
290 290 if b'x' in fallback_value:
291 291 return b'x'
292 292 return b''
293 293
294 294 return get_flags
295 295
296 296 @propertycache
297 297 def _cwd(self):
298 298 # internal config: ui.forcecwd
299 299 forcecwd = self._ui.config(b'ui', b'forcecwd')
300 300 if forcecwd:
301 301 return forcecwd
302 302 return encoding.getcwd()
303 303
304 304 def getcwd(self):
305 305 """Return the path from which a canonical path is calculated.
306 306
307 307 This path should be used to resolve file patterns or to convert
308 308 canonical paths back to file paths for display. It shouldn't be
309 309 used to get real file paths. Use vfs functions instead.
310 310 """
311 311 cwd = self._cwd
312 312 if cwd == self._root:
313 313 return b''
314 314 # self._root ends with a path separator if self._root is '/' or 'C:\'
315 315 rootsep = self._root
316 316 if not util.endswithsep(rootsep):
317 317 rootsep += pycompat.ossep
318 318 if cwd.startswith(rootsep):
319 319 return cwd[len(rootsep) :]
320 320 else:
321 321 # we're outside the repo. return an absolute path.
322 322 return cwd
323 323
324 324 def pathto(self, f, cwd=None):
325 325 if cwd is None:
326 326 cwd = self.getcwd()
327 327 path = util.pathto(self._root, cwd, f)
328 328 if self._slash:
329 329 return util.pconvert(path)
330 330 return path
331 331
332 332 def get_entry(self, path):
333 333 """return a DirstateItem for the associated path"""
334 334 entry = self._map.get(path)
335 335 if entry is None:
336 336 return DirstateItem()
337 337 return entry
338 338
339 339 def __contains__(self, key):
340 340 return key in self._map
341 341
342 342 def __iter__(self):
343 343 return iter(sorted(self._map))
344 344
345 345 def items(self):
346 346 return pycompat.iteritems(self._map)
347 347
348 348 iteritems = items
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 nullid = self._nodeconstants.nullid
389 389 # True if we need to fold p2 related state back to a linear case
390 390 fold_p2 = oldp2 != nullid and p2 == nullid
391 391 return self._map.setparents(p1, p2, fold_p2=fold_p2)
392 392
393 393 def setbranch(self, branch):
394 394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 396 try:
397 397 f.write(self._branch + b'\n')
398 398 f.close()
399 399
400 400 # make sure filecache has the correct stat info for _branch after
401 401 # replacing the underlying file
402 402 ce = self._filecache[b'_branch']
403 403 if ce:
404 404 ce.refresh()
405 405 except: # re-raises
406 406 f.discard()
407 407 raise
408 408
409 409 def invalidate(self):
410 410 """Causes the next access to reread the dirstate.
411 411
412 412 This is different from localrepo.invalidatedirstate() because it always
413 413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 414 check whether the dirstate has changed before rereading it."""
415 415
416 416 for a in ("_map", "_branch", "_ignore"):
417 417 if a in self.__dict__:
418 418 delattr(self, a)
419 419 self._dirty = False
420 420 self._dirty_tracked_set = False
421 421 self._parentwriters = 0
422 422 self._origpl = None
423 423
424 424 def copy(self, source, dest):
425 425 """Mark dest as a copy of source. Unmark dest if source is None."""
426 426 if source == dest:
427 427 return
428 428 self._dirty = True
429 429 if source is not None:
430 430 self._map.copymap[dest] = source
431 431 else:
432 432 self._map.copymap.pop(dest, None)
433 433
434 434 def copied(self, file):
435 435 return self._map.copymap.get(file, None)
436 436
437 437 def copies(self):
438 438 return self._map.copymap
439 439
440 440 @requires_no_parents_change
441 441 def set_tracked(self, filename, reset_copy=False):
442 442 """a "public" method for generic code to mark a file as tracked
443 443
444 444 This function is to be called outside of "update/merge" case. For
445 445 example by a command like `hg add X`.
446 446
447 447 if reset_copy is set, any existing copy information will be dropped.
448 448
449 449 return True the file was previously untracked, False otherwise.
450 450 """
451 451 self._dirty = True
452 452 entry = self._map.get(filename)
453 453 if entry is None or not entry.tracked:
454 454 self._check_new_tracked_filename(filename)
455 455 pre_tracked = self._map.set_tracked(filename)
456 456 if reset_copy:
457 457 self._map.copymap.pop(filename, None)
458 458 if pre_tracked:
459 459 self._dirty_tracked_set = True
460 460 return pre_tracked
461 461
462 462 @requires_no_parents_change
463 463 def set_untracked(self, filename):
464 464 """a "public" method for generic code to mark a file as untracked
465 465
466 466 This function is to be called outside of "update/merge" case. For
467 467 example by a command like `hg remove X`.
468 468
469 469 return True the file was previously tracked, False otherwise.
470 470 """
471 471 ret = self._map.set_untracked(filename)
472 472 if ret:
473 473 self._dirty = True
474 474 self._dirty_tracked_set = True
475 475 return ret
476 476
477 477 @requires_no_parents_change
478 478 def set_clean(self, filename, parentfiledata):
479 479 """record that the current state of the file on disk is known to be clean"""
480 480 self._dirty = True
481 481 if not self._map[filename].tracked:
482 482 self._check_new_tracked_filename(filename)
483 483 (mode, size, mtime) = parentfiledata
484 484 self._map.set_clean(filename, mode, size, mtime)
485 485
486 486 @requires_no_parents_change
487 487 def set_possibly_dirty(self, filename):
488 488 """record that the current state of the file on disk is unknown"""
489 489 self._dirty = True
490 490 self._map.set_possibly_dirty(filename)
491 491
492 492 @requires_parents_change
493 493 def update_file_p1(
494 494 self,
495 495 filename,
496 496 p1_tracked,
497 497 ):
498 498 """Set a file as tracked in the parent (or not)
499 499
500 500 This is to be called when adjust the dirstate to a new parent after an history
501 501 rewriting operation.
502 502
503 503 It should not be called during a merge (p2 != nullid) and only within
504 504 a `with dirstate.parentchange():` context.
505 505 """
506 506 if self.in_merge:
507 507 msg = b'update_file_reference should not be called when merging'
508 508 raise error.ProgrammingError(msg)
509 509 entry = self._map.get(filename)
510 510 if entry is None:
511 511 wc_tracked = False
512 512 else:
513 513 wc_tracked = entry.tracked
514 514 if not (p1_tracked or wc_tracked):
515 515 # the file is no longer relevant to anyone
516 516 if self._map.get(filename) is not None:
517 517 self._map.reset_state(filename)
518 518 self._dirty = True
519 519 elif (not p1_tracked) and wc_tracked:
520 520 if entry is not None and entry.added:
521 521 return # avoid dropping copy information (maybe?)
522 522
523 523 self._map.reset_state(
524 524 filename,
525 525 wc_tracked,
526 526 p1_tracked,
527 527 # the underlying reference might have changed, we will have to
528 528 # check it.
529 529 has_meaningful_mtime=False,
530 530 )
531 531
532 532 @requires_parents_change
533 533 def update_file(
534 534 self,
535 535 filename,
536 536 wc_tracked,
537 537 p1_tracked,
538 538 p2_info=False,
539 539 possibly_dirty=False,
540 540 parentfiledata=None,
541 541 ):
542 542 """update the information about a file in the dirstate
543 543
544 544 This is to be called when the direstates parent changes to keep track
545 545 of what is the file situation in regards to the working copy and its parent.
546 546
547 547 This function must be called within a `dirstate.parentchange` context.
548 548
549 549 note: the API is at an early stage and we might need to adjust it
550 550 depending of what information ends up being relevant and useful to
551 551 other processing.
552 552 """
553 553
554 554 # note: I do not think we need to double check name clash here since we
555 555 # are in a update/merge case that should already have taken care of
556 556 # this. The test agrees
557 557
558 558 self._dirty = True
559 559 old_entry = self._map.get(filename)
560 560 if old_entry is None:
561 561 prev_tracked = False
562 562 else:
563 563 prev_tracked = old_entry.tracked
564 564 if prev_tracked != wc_tracked:
565 565 self._dirty_tracked_set = True
566 566
567 567 self._map.reset_state(
568 568 filename,
569 569 wc_tracked,
570 570 p1_tracked,
571 571 p2_info=p2_info,
572 572 has_meaningful_mtime=not possibly_dirty,
573 573 parentfiledata=parentfiledata,
574 574 )
575 575
576 576 def _check_new_tracked_filename(self, filename):
577 577 scmutil.checkfilename(filename)
578 578 if self._map.hastrackeddir(filename):
579 579 msg = _(b'directory %r already in dirstate')
580 580 msg %= pycompat.bytestr(filename)
581 581 raise error.Abort(msg)
582 582 # shadows
583 583 for d in pathutil.finddirs(filename):
584 584 if self._map.hastrackeddir(d):
585 585 break
586 586 entry = self._map.get(d)
587 587 if entry is not None and not entry.removed:
588 588 msg = _(b'file %r in dirstate clashes with %r')
589 589 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
590 590 raise error.Abort(msg)
591 591
592 592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 593 if exists is None:
594 594 exists = os.path.lexists(os.path.join(self._root, path))
595 595 if not exists:
596 596 # Maybe a path component exists
597 597 if not ignoremissing and b'/' in path:
598 598 d, f = path.rsplit(b'/', 1)
599 599 d = self._normalize(d, False, ignoremissing, None)
600 600 folded = d + b"/" + f
601 601 else:
602 602 # No path components, preserve original case
603 603 folded = path
604 604 else:
605 605 # recursively normalize leading directory components
606 606 # against dirstate
607 607 if b'/' in normed:
608 608 d, f = normed.rsplit(b'/', 1)
609 609 d = self._normalize(d, False, ignoremissing, True)
610 610 r = self._root + b"/" + d
611 611 folded = d + b"/" + util.fspath(f, r)
612 612 else:
613 613 folded = util.fspath(normed, self._root)
614 614 storemap[normed] = folded
615 615
616 616 return folded
617 617
618 618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 619 normed = util.normcase(path)
620 620 folded = self._map.filefoldmap.get(normed, None)
621 621 if folded is None:
622 622 if isknown:
623 623 folded = path
624 624 else:
625 625 folded = self._discoverpath(
626 626 path, normed, ignoremissing, exists, self._map.filefoldmap
627 627 )
628 628 return folded
629 629
630 630 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
631 631 normed = util.normcase(path)
632 632 folded = self._map.filefoldmap.get(normed, None)
633 633 if folded is None:
634 634 folded = self._map.dirfoldmap.get(normed, None)
635 635 if folded is None:
636 636 if isknown:
637 637 folded = path
638 638 else:
639 639 # store discovered result in dirfoldmap so that future
640 640 # normalizefile calls don't start matching directories
641 641 folded = self._discoverpath(
642 642 path, normed, ignoremissing, exists, self._map.dirfoldmap
643 643 )
644 644 return folded
645 645
646 646 def normalize(self, path, isknown=False, ignoremissing=False):
647 647 """
648 648 normalize the case of a pathname when on a casefolding filesystem
649 649
650 650 isknown specifies whether the filename came from walking the
651 651 disk, to avoid extra filesystem access.
652 652
653 653 If ignoremissing is True, missing path are returned
654 654 unchanged. Otherwise, we try harder to normalize possibly
655 655 existing path components.
656 656
657 657 The normalized case is determined based on the following precedence:
658 658
659 659 - version of name already stored in the dirstate
660 660 - version of name stored on disk
661 661 - version provided via command arguments
662 662 """
663 663
664 664 if self._checkcase:
665 665 return self._normalize(path, isknown, ignoremissing)
666 666 return path
667 667
668 668 def clear(self):
669 669 self._map.clear()
670 670 self._dirty = True
671 671
672 672 def rebuild(self, parent, allfiles, changedfiles=None):
673 673 if changedfiles is None:
674 674 # Rebuild entire dirstate
675 675 to_lookup = allfiles
676 676 to_drop = []
677 677 self.clear()
678 678 elif len(changedfiles) < 10:
679 679 # Avoid turning allfiles into a set, which can be expensive if it's
680 680 # large.
681 681 to_lookup = []
682 682 to_drop = []
683 683 for f in changedfiles:
684 684 if f in allfiles:
685 685 to_lookup.append(f)
686 686 else:
687 687 to_drop.append(f)
688 688 else:
689 689 changedfilesset = set(changedfiles)
690 690 to_lookup = changedfilesset & set(allfiles)
691 691 to_drop = changedfilesset - to_lookup
692 692
693 693 if self._origpl is None:
694 694 self._origpl = self._pl
695 695 self._map.setparents(parent, self._nodeconstants.nullid)
696 696
697 697 for f in to_lookup:
698 698
699 699 if self.in_merge:
700 700 self.set_tracked(f)
701 701 else:
702 702 self._map.reset_state(
703 703 f,
704 704 wc_tracked=True,
705 705 p1_tracked=True,
706 706 )
707 707 for f in to_drop:
708 708 self._map.reset_state(f)
709 709
710 710 self._dirty = True
711 711
712 712 def identity(self):
713 713 """Return identity of dirstate itself to detect changing in storage
714 714
715 715 If identity of previous dirstate is equal to this, writing
716 716 changes based on the former dirstate out can keep consistency.
717 717 """
718 718 return self._map.identity
719 719
720 720 def write(self, tr):
721 721 if not self._dirty:
722 722 return
723 723
724 724 write_key = self._use_tracked_key and self._dirty_tracked_set
725 725 if tr:
726 726 # delay writing in-memory changes out
727 727 if write_key:
728 728 tr.addfilegenerator(
729 729 b'dirstate-0-key-pre',
730 730 (self._filename_tk,),
731 731 lambda f: self._write_tracked_key(tr, f),
732 732 location=b'plain',
733 post_finalize=True,
733 734 )
734 735 tr.addfilegenerator(
735 736 b'dirstate-1-main',
736 737 (self._filename,),
737 738 lambda f: self._writedirstate(tr, f),
738 739 location=b'plain',
740 post_finalize=True,
739 741 )
740 742 if write_key:
741 743 tr.addfilegenerator(
742 744 b'dirstate-2-key-post',
743 745 (self._filename_tk,),
744 746 lambda f: self._write_tracked_key(tr, f),
745 747 location=b'plain',
748 post_finalize=True,
746 749 )
747 750 return
748 751
749 752 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
750 753 if write_key:
751 754 # we change the key-file before changing the dirstate to make sure
752 755 # reading invalidate there cache before we start writing
753 756 with file(self._filename_tk) as f:
754 757 self._write_tracked_key(tr, f)
755 758 with file(self._filename) as f:
756 759 self._writedirstate(tr, f)
757 760 if write_key:
758 761 # we update the key-file after writing to make sure reader have a
759 762 # key that match the newly written content
760 763 with file(self._filename_tk) as f:
761 764 self._write_tracked_key(tr, f)
762 765
763 766 def addparentchangecallback(self, category, callback):
764 767 """add a callback to be called when the wd parents are changed
765 768
766 769 Callback will be called with the following arguments:
767 770 dirstate, (oldp1, oldp2), (newp1, newp2)
768 771
769 772 Category is a unique identifier to allow overwriting an old callback
770 773 with a newer callback.
771 774 """
772 775 self._plchangecallbacks[category] = callback
773 776
774 777 def _writedirstate(self, tr, st):
775 778 # notify callbacks about parents change
776 779 if self._origpl is not None and self._origpl != self._pl:
777 780 for c, callback in sorted(
778 781 pycompat.iteritems(self._plchangecallbacks)
779 782 ):
780 783 callback(self, self._origpl, self._pl)
781 784 self._origpl = None
782 785 self._map.write(tr, st)
783 786 self._dirty = False
784 787 self._dirty_tracked_set = False
785 788
786 789 def _write_tracked_key(self, tr, f):
787 790 key = node.hex(uuid.uuid4().bytes)
788 791 f.write(b"1\n%s\n" % key) # 1 is the format version
789 792
790 793 def _dirignore(self, f):
791 794 if self._ignore(f):
792 795 return True
793 796 for p in pathutil.finddirs(f):
794 797 if self._ignore(p):
795 798 return True
796 799 return False
797 800
798 801 def _ignorefiles(self):
799 802 files = []
800 803 if os.path.exists(self._join(b'.hgignore')):
801 804 files.append(self._join(b'.hgignore'))
802 805 for name, path in self._ui.configitems(b"ui"):
803 806 if name == b'ignore' or name.startswith(b'ignore.'):
804 807 # we need to use os.path.join here rather than self._join
805 808 # because path is arbitrary and user-specified
806 809 files.append(os.path.join(self._rootdir, util.expandpath(path)))
807 810 return files
808 811
809 812 def _ignorefileandline(self, f):
810 813 files = collections.deque(self._ignorefiles())
811 814 visited = set()
812 815 while files:
813 816 i = files.popleft()
814 817 patterns = matchmod.readpatternfile(
815 818 i, self._ui.warn, sourceinfo=True
816 819 )
817 820 for pattern, lineno, line in patterns:
818 821 kind, p = matchmod._patsplit(pattern, b'glob')
819 822 if kind == b"subinclude":
820 823 if p not in visited:
821 824 files.append(p)
822 825 continue
823 826 m = matchmod.match(
824 827 self._root, b'', [], [pattern], warn=self._ui.warn
825 828 )
826 829 if m(f):
827 830 return (i, lineno, line)
828 831 visited.add(i)
829 832 return (None, -1, b"")
830 833
831 834 def _walkexplicit(self, match, subrepos):
832 835 """Get stat data about the files explicitly specified by match.
833 836
834 837 Return a triple (results, dirsfound, dirsnotfound).
835 838 - results is a mapping from filename to stat result. It also contains
836 839 listings mapping subrepos and .hg to None.
837 840 - dirsfound is a list of files found to be directories.
838 841 - dirsnotfound is a list of files that the dirstate thinks are
839 842 directories and that were not found."""
840 843
841 844 def badtype(mode):
842 845 kind = _(b'unknown')
843 846 if stat.S_ISCHR(mode):
844 847 kind = _(b'character device')
845 848 elif stat.S_ISBLK(mode):
846 849 kind = _(b'block device')
847 850 elif stat.S_ISFIFO(mode):
848 851 kind = _(b'fifo')
849 852 elif stat.S_ISSOCK(mode):
850 853 kind = _(b'socket')
851 854 elif stat.S_ISDIR(mode):
852 855 kind = _(b'directory')
853 856 return _(b'unsupported file type (type is %s)') % kind
854 857
855 858 badfn = match.bad
856 859 dmap = self._map
857 860 lstat = os.lstat
858 861 getkind = stat.S_IFMT
859 862 dirkind = stat.S_IFDIR
860 863 regkind = stat.S_IFREG
861 864 lnkkind = stat.S_IFLNK
862 865 join = self._join
863 866 dirsfound = []
864 867 foundadd = dirsfound.append
865 868 dirsnotfound = []
866 869 notfoundadd = dirsnotfound.append
867 870
868 871 if not match.isexact() and self._checkcase:
869 872 normalize = self._normalize
870 873 else:
871 874 normalize = None
872 875
873 876 files = sorted(match.files())
874 877 subrepos.sort()
875 878 i, j = 0, 0
876 879 while i < len(files) and j < len(subrepos):
877 880 subpath = subrepos[j] + b"/"
878 881 if files[i] < subpath:
879 882 i += 1
880 883 continue
881 884 while i < len(files) and files[i].startswith(subpath):
882 885 del files[i]
883 886 j += 1
884 887
885 888 if not files or b'' in files:
886 889 files = [b'']
887 890 # constructing the foldmap is expensive, so don't do it for the
888 891 # common case where files is ['']
889 892 normalize = None
890 893 results = dict.fromkeys(subrepos)
891 894 results[b'.hg'] = None
892 895
893 896 for ff in files:
894 897 if normalize:
895 898 nf = normalize(ff, False, True)
896 899 else:
897 900 nf = ff
898 901 if nf in results:
899 902 continue
900 903
901 904 try:
902 905 st = lstat(join(nf))
903 906 kind = getkind(st.st_mode)
904 907 if kind == dirkind:
905 908 if nf in dmap:
906 909 # file replaced by dir on disk but still in dirstate
907 910 results[nf] = None
908 911 foundadd((nf, ff))
909 912 elif kind == regkind or kind == lnkkind:
910 913 results[nf] = st
911 914 else:
912 915 badfn(ff, badtype(kind))
913 916 if nf in dmap:
914 917 results[nf] = None
915 918 except OSError as inst: # nf not found on disk - it is dirstate only
916 919 if nf in dmap: # does it exactly match a missing file?
917 920 results[nf] = None
918 921 else: # does it match a missing directory?
919 922 if self._map.hasdir(nf):
920 923 notfoundadd(nf)
921 924 else:
922 925 badfn(ff, encoding.strtolocal(inst.strerror))
923 926
924 927 # match.files() may contain explicitly-specified paths that shouldn't
925 928 # be taken; drop them from the list of files found. dirsfound/notfound
926 929 # aren't filtered here because they will be tested later.
927 930 if match.anypats():
928 931 for f in list(results):
929 932 if f == b'.hg' or f in subrepos:
930 933 # keep sentinel to disable further out-of-repo walks
931 934 continue
932 935 if not match(f):
933 936 del results[f]
934 937
935 938 # Case insensitive filesystems cannot rely on lstat() failing to detect
936 939 # a case-only rename. Prune the stat object for any file that does not
937 940 # match the case in the filesystem, if there are multiple files that
938 941 # normalize to the same path.
939 942 if match.isexact() and self._checkcase:
940 943 normed = {}
941 944
942 945 for f, st in pycompat.iteritems(results):
943 946 if st is None:
944 947 continue
945 948
946 949 nc = util.normcase(f)
947 950 paths = normed.get(nc)
948 951
949 952 if paths is None:
950 953 paths = set()
951 954 normed[nc] = paths
952 955
953 956 paths.add(f)
954 957
955 958 for norm, paths in pycompat.iteritems(normed):
956 959 if len(paths) > 1:
957 960 for path in paths:
958 961 folded = self._discoverpath(
959 962 path, norm, True, None, self._map.dirfoldmap
960 963 )
961 964 if path != folded:
962 965 results[path] = None
963 966
964 967 return results, dirsfound, dirsnotfound
965 968
966 969 def walk(self, match, subrepos, unknown, ignored, full=True):
967 970 """
968 971 Walk recursively through the directory tree, finding all files
969 972 matched by match.
970 973
971 974 If full is False, maybe skip some known-clean files.
972 975
973 976 Return a dict mapping filename to stat-like object (either
974 977 mercurial.osutil.stat instance or return value of os.stat()).
975 978
976 979 """
977 980 # full is a flag that extensions that hook into walk can use -- this
978 981 # implementation doesn't use it at all. This satisfies the contract
979 982 # because we only guarantee a "maybe".
980 983
981 984 if ignored:
982 985 ignore = util.never
983 986 dirignore = util.never
984 987 elif unknown:
985 988 ignore = self._ignore
986 989 dirignore = self._dirignore
987 990 else:
988 991 # if not unknown and not ignored, drop dir recursion and step 2
989 992 ignore = util.always
990 993 dirignore = util.always
991 994
992 995 matchfn = match.matchfn
993 996 matchalways = match.always()
994 997 matchtdir = match.traversedir
995 998 dmap = self._map
996 999 listdir = util.listdir
997 1000 lstat = os.lstat
998 1001 dirkind = stat.S_IFDIR
999 1002 regkind = stat.S_IFREG
1000 1003 lnkkind = stat.S_IFLNK
1001 1004 join = self._join
1002 1005
1003 1006 exact = skipstep3 = False
1004 1007 if match.isexact(): # match.exact
1005 1008 exact = True
1006 1009 dirignore = util.always # skip step 2
1007 1010 elif match.prefix(): # match.match, no patterns
1008 1011 skipstep3 = True
1009 1012
1010 1013 if not exact and self._checkcase:
1011 1014 normalize = self._normalize
1012 1015 normalizefile = self._normalizefile
1013 1016 skipstep3 = False
1014 1017 else:
1015 1018 normalize = self._normalize
1016 1019 normalizefile = None
1017 1020
1018 1021 # step 1: find all explicit files
1019 1022 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1020 1023 if matchtdir:
1021 1024 for d in work:
1022 1025 matchtdir(d[0])
1023 1026 for d in dirsnotfound:
1024 1027 matchtdir(d)
1025 1028
1026 1029 skipstep3 = skipstep3 and not (work or dirsnotfound)
1027 1030 work = [d for d in work if not dirignore(d[0])]
1028 1031
1029 1032 # step 2: visit subdirectories
1030 1033 def traverse(work, alreadynormed):
1031 1034 wadd = work.append
1032 1035 while work:
1033 1036 tracing.counter('dirstate.walk work', len(work))
1034 1037 nd = work.pop()
1035 1038 visitentries = match.visitchildrenset(nd)
1036 1039 if not visitentries:
1037 1040 continue
1038 1041 if visitentries == b'this' or visitentries == b'all':
1039 1042 visitentries = None
1040 1043 skip = None
1041 1044 if nd != b'':
1042 1045 skip = b'.hg'
1043 1046 try:
1044 1047 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1045 1048 entries = listdir(join(nd), stat=True, skip=skip)
1046 1049 except OSError as inst:
1047 1050 if inst.errno in (errno.EACCES, errno.ENOENT):
1048 1051 match.bad(
1049 1052 self.pathto(nd), encoding.strtolocal(inst.strerror)
1050 1053 )
1051 1054 continue
1052 1055 raise
1053 1056 for f, kind, st in entries:
1054 1057 # Some matchers may return files in the visitentries set,
1055 1058 # instead of 'this', if the matcher explicitly mentions them
1056 1059 # and is not an exactmatcher. This is acceptable; we do not
1057 1060 # make any hard assumptions about file-or-directory below
1058 1061 # based on the presence of `f` in visitentries. If
1059 1062 # visitchildrenset returned a set, we can always skip the
1060 1063 # entries *not* in the set it provided regardless of whether
1061 1064 # they're actually a file or a directory.
1062 1065 if visitentries and f not in visitentries:
1063 1066 continue
1064 1067 if normalizefile:
1065 1068 # even though f might be a directory, we're only
1066 1069 # interested in comparing it to files currently in the
1067 1070 # dmap -- therefore normalizefile is enough
1068 1071 nf = normalizefile(
1069 1072 nd and (nd + b"/" + f) or f, True, True
1070 1073 )
1071 1074 else:
1072 1075 nf = nd and (nd + b"/" + f) or f
1073 1076 if nf not in results:
1074 1077 if kind == dirkind:
1075 1078 if not ignore(nf):
1076 1079 if matchtdir:
1077 1080 matchtdir(nf)
1078 1081 wadd(nf)
1079 1082 if nf in dmap and (matchalways or matchfn(nf)):
1080 1083 results[nf] = None
1081 1084 elif kind == regkind or kind == lnkkind:
1082 1085 if nf in dmap:
1083 1086 if matchalways or matchfn(nf):
1084 1087 results[nf] = st
1085 1088 elif (matchalways or matchfn(nf)) and not ignore(
1086 1089 nf
1087 1090 ):
1088 1091 # unknown file -- normalize if necessary
1089 1092 if not alreadynormed:
1090 1093 nf = normalize(nf, False, True)
1091 1094 results[nf] = st
1092 1095 elif nf in dmap and (matchalways or matchfn(nf)):
1093 1096 results[nf] = None
1094 1097
1095 1098 for nd, d in work:
1096 1099 # alreadynormed means that processwork doesn't have to do any
1097 1100 # expensive directory normalization
1098 1101 alreadynormed = not normalize or nd == d
1099 1102 traverse([d], alreadynormed)
1100 1103
1101 1104 for s in subrepos:
1102 1105 del results[s]
1103 1106 del results[b'.hg']
1104 1107
1105 1108 # step 3: visit remaining files from dmap
1106 1109 if not skipstep3 and not exact:
1107 1110 # If a dmap file is not in results yet, it was either
1108 1111 # a) not matching matchfn b) ignored, c) missing, or d) under a
1109 1112 # symlink directory.
1110 1113 if not results and matchalways:
1111 1114 visit = [f for f in dmap]
1112 1115 else:
1113 1116 visit = [f for f in dmap if f not in results and matchfn(f)]
1114 1117 visit.sort()
1115 1118
1116 1119 if unknown:
1117 1120 # unknown == True means we walked all dirs under the roots
1118 1121 # that wasn't ignored, and everything that matched was stat'ed
1119 1122 # and is already in results.
1120 1123 # The rest must thus be ignored or under a symlink.
1121 1124 audit_path = pathutil.pathauditor(self._root, cached=True)
1122 1125
1123 1126 for nf in iter(visit):
1124 1127 # If a stat for the same file was already added with a
1125 1128 # different case, don't add one for this, since that would
1126 1129 # make it appear as if the file exists under both names
1127 1130 # on disk.
1128 1131 if (
1129 1132 normalizefile
1130 1133 and normalizefile(nf, True, True) in results
1131 1134 ):
1132 1135 results[nf] = None
1133 1136 # Report ignored items in the dmap as long as they are not
1134 1137 # under a symlink directory.
1135 1138 elif audit_path.check(nf):
1136 1139 try:
1137 1140 results[nf] = lstat(join(nf))
1138 1141 # file was just ignored, no links, and exists
1139 1142 except OSError:
1140 1143 # file doesn't exist
1141 1144 results[nf] = None
1142 1145 else:
1143 1146 # It's either missing or under a symlink directory
1144 1147 # which we in this case report as missing
1145 1148 results[nf] = None
1146 1149 else:
1147 1150 # We may not have walked the full directory tree above,
1148 1151 # so stat and check everything we missed.
1149 1152 iv = iter(visit)
1150 1153 for st in util.statfiles([join(i) for i in visit]):
1151 1154 results[next(iv)] = st
1152 1155 return results
1153 1156
1154 1157 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1155 1158 # Force Rayon (Rust parallelism library) to respect the number of
1156 1159 # workers. This is a temporary workaround until Rust code knows
1157 1160 # how to read the config file.
1158 1161 numcpus = self._ui.configint(b"worker", b"numcpus")
1159 1162 if numcpus is not None:
1160 1163 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1161 1164
1162 1165 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1163 1166 if not workers_enabled:
1164 1167 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1165 1168
1166 1169 (
1167 1170 lookup,
1168 1171 modified,
1169 1172 added,
1170 1173 removed,
1171 1174 deleted,
1172 1175 clean,
1173 1176 ignored,
1174 1177 unknown,
1175 1178 warnings,
1176 1179 bad,
1177 1180 traversed,
1178 1181 dirty,
1179 1182 ) = rustmod.status(
1180 1183 self._map._map,
1181 1184 matcher,
1182 1185 self._rootdir,
1183 1186 self._ignorefiles(),
1184 1187 self._checkexec,
1185 1188 bool(list_clean),
1186 1189 bool(list_ignored),
1187 1190 bool(list_unknown),
1188 1191 bool(matcher.traversedir),
1189 1192 )
1190 1193
1191 1194 self._dirty |= dirty
1192 1195
1193 1196 if matcher.traversedir:
1194 1197 for dir in traversed:
1195 1198 matcher.traversedir(dir)
1196 1199
1197 1200 if self._ui.warn:
1198 1201 for item in warnings:
1199 1202 if isinstance(item, tuple):
1200 1203 file_path, syntax = item
1201 1204 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1202 1205 file_path,
1203 1206 syntax,
1204 1207 )
1205 1208 self._ui.warn(msg)
1206 1209 else:
1207 1210 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1208 1211 self._ui.warn(
1209 1212 msg
1210 1213 % (
1211 1214 pathutil.canonpath(
1212 1215 self._rootdir, self._rootdir, item
1213 1216 ),
1214 1217 b"No such file or directory",
1215 1218 )
1216 1219 )
1217 1220
1218 1221 for (fn, message) in bad:
1219 1222 matcher.bad(fn, encoding.strtolocal(message))
1220 1223
1221 1224 status = scmutil.status(
1222 1225 modified=modified,
1223 1226 added=added,
1224 1227 removed=removed,
1225 1228 deleted=deleted,
1226 1229 unknown=unknown,
1227 1230 ignored=ignored,
1228 1231 clean=clean,
1229 1232 )
1230 1233 return (lookup, status)
1231 1234
1232 1235 def status(self, match, subrepos, ignored, clean, unknown):
1233 1236 """Determine the status of the working copy relative to the
1234 1237 dirstate and return a pair of (unsure, status), where status is of type
1235 1238 scmutil.status and:
1236 1239
1237 1240 unsure:
1238 1241 files that might have been modified since the dirstate was
1239 1242 written, but need to be read to be sure (size is the same
1240 1243 but mtime differs)
1241 1244 status.modified:
1242 1245 files that have definitely been modified since the dirstate
1243 1246 was written (different size or mode)
1244 1247 status.clean:
1245 1248 files that have definitely not been modified since the
1246 1249 dirstate was written
1247 1250 """
1248 1251 listignored, listclean, listunknown = ignored, clean, unknown
1249 1252 lookup, modified, added, unknown, ignored = [], [], [], [], []
1250 1253 removed, deleted, clean = [], [], []
1251 1254
1252 1255 dmap = self._map
1253 1256 dmap.preload()
1254 1257
1255 1258 use_rust = True
1256 1259
1257 1260 allowed_matchers = (
1258 1261 matchmod.alwaysmatcher,
1259 1262 matchmod.exactmatcher,
1260 1263 matchmod.includematcher,
1261 1264 )
1262 1265
1263 1266 if rustmod is None:
1264 1267 use_rust = False
1265 1268 elif self._checkcase:
1266 1269 # Case-insensitive filesystems are not handled yet
1267 1270 use_rust = False
1268 1271 elif subrepos:
1269 1272 use_rust = False
1270 1273 elif sparse.enabled:
1271 1274 use_rust = False
1272 1275 elif not isinstance(match, allowed_matchers):
1273 1276 # Some matchers have yet to be implemented
1274 1277 use_rust = False
1275 1278
1276 1279 # Get the time from the filesystem so we can disambiguate files that
1277 1280 # appear modified in the present or future.
1278 1281 try:
1279 1282 mtime_boundary = timestamp.get_fs_now(self._opener)
1280 1283 except OSError:
1281 1284 # In largefiles or readonly context
1282 1285 mtime_boundary = None
1283 1286
1284 1287 if use_rust:
1285 1288 try:
1286 1289 res = self._rust_status(
1287 1290 match, listclean, listignored, listunknown
1288 1291 )
1289 1292 return res + (mtime_boundary,)
1290 1293 except rustmod.FallbackError:
1291 1294 pass
1292 1295
1293 1296 def noop(f):
1294 1297 pass
1295 1298
1296 1299 dcontains = dmap.__contains__
1297 1300 dget = dmap.__getitem__
1298 1301 ladd = lookup.append # aka "unsure"
1299 1302 madd = modified.append
1300 1303 aadd = added.append
1301 1304 uadd = unknown.append if listunknown else noop
1302 1305 iadd = ignored.append if listignored else noop
1303 1306 radd = removed.append
1304 1307 dadd = deleted.append
1305 1308 cadd = clean.append if listclean else noop
1306 1309 mexact = match.exact
1307 1310 dirignore = self._dirignore
1308 1311 checkexec = self._checkexec
1309 1312 checklink = self._checklink
1310 1313 copymap = self._map.copymap
1311 1314
1312 1315 # We need to do full walks when either
1313 1316 # - we're listing all clean files, or
1314 1317 # - match.traversedir does something, because match.traversedir should
1315 1318 # be called for every dir in the working dir
1316 1319 full = listclean or match.traversedir is not None
1317 1320 for fn, st in pycompat.iteritems(
1318 1321 self.walk(match, subrepos, listunknown, listignored, full=full)
1319 1322 ):
1320 1323 if not dcontains(fn):
1321 1324 if (listignored or mexact(fn)) and dirignore(fn):
1322 1325 if listignored:
1323 1326 iadd(fn)
1324 1327 else:
1325 1328 uadd(fn)
1326 1329 continue
1327 1330
1328 1331 t = dget(fn)
1329 1332 mode = t.mode
1330 1333 size = t.size
1331 1334
1332 1335 if not st and t.tracked:
1333 1336 dadd(fn)
1334 1337 elif t.p2_info:
1335 1338 madd(fn)
1336 1339 elif t.added:
1337 1340 aadd(fn)
1338 1341 elif t.removed:
1339 1342 radd(fn)
1340 1343 elif t.tracked:
1341 1344 if not checklink and t.has_fallback_symlink:
1342 1345 # If the file system does not support symlink, the mode
1343 1346 # might not be correctly stored in the dirstate, so do not
1344 1347 # trust it.
1345 1348 ladd(fn)
1346 1349 elif not checkexec and t.has_fallback_exec:
1347 1350 # If the file system does not support exec bits, the mode
1348 1351 # might not be correctly stored in the dirstate, so do not
1349 1352 # trust it.
1350 1353 ladd(fn)
1351 1354 elif (
1352 1355 size >= 0
1353 1356 and (
1354 1357 (size != st.st_size and size != st.st_size & _rangemask)
1355 1358 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1356 1359 )
1357 1360 or fn in copymap
1358 1361 ):
1359 1362 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1360 1363 # issue6456: Size returned may be longer due to
1361 1364 # encryption on EXT-4 fscrypt, undecided.
1362 1365 ladd(fn)
1363 1366 else:
1364 1367 madd(fn)
1365 1368 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1366 1369 # There might be a change in the future if for example the
1367 1370 # internal clock is off, but this is a case where the issues
1368 1371 # the user would face would be a lot worse and there is
1369 1372 # nothing we can really do.
1370 1373 ladd(fn)
1371 1374 elif listclean:
1372 1375 cadd(fn)
1373 1376 status = scmutil.status(
1374 1377 modified, added, removed, deleted, unknown, ignored, clean
1375 1378 )
1376 1379 return (lookup, status, mtime_boundary)
1377 1380
1378 1381 def matches(self, match):
1379 1382 """
1380 1383 return files in the dirstate (in whatever state) filtered by match
1381 1384 """
1382 1385 dmap = self._map
1383 1386 if rustmod is not None:
1384 1387 dmap = self._map._map
1385 1388
1386 1389 if match.always():
1387 1390 return dmap.keys()
1388 1391 files = match.files()
1389 1392 if match.isexact():
1390 1393 # fast path -- filter the other way around, since typically files is
1391 1394 # much smaller than dmap
1392 1395 return [f for f in files if f in dmap]
1393 1396 if match.prefix() and all(fn in dmap for fn in files):
1394 1397 # fast path -- all the values are known to be files, so just return
1395 1398 # that
1396 1399 return list(files)
1397 1400 return [f for f in dmap if match(f)]
1398 1401
1399 1402 def _actualfilename(self, tr):
1400 1403 if tr:
1401 1404 return self._pendingfilename
1402 1405 else:
1403 1406 return self._filename
1404 1407
1405 1408 def savebackup(self, tr, backupname):
1406 1409 '''Save current dirstate into backup file'''
1407 1410 filename = self._actualfilename(tr)
1408 1411 assert backupname != filename
1409 1412
1410 1413 # use '_writedirstate' instead of 'write' to write changes certainly,
1411 1414 # because the latter omits writing out if transaction is running.
1412 1415 # output file will be used to create backup of dirstate at this point.
1413 1416 if self._dirty or not self._opener.exists(filename):
1414 1417 self._writedirstate(
1415 1418 tr,
1416 1419 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1417 1420 )
1418 1421
1419 1422 if tr:
1420 1423 # ensure that subsequent tr.writepending returns True for
1421 1424 # changes written out above, even if dirstate is never
1422 1425 # changed after this
1423 1426 tr.addfilegenerator(
1424 1427 b'dirstate-1-main',
1425 1428 (self._filename,),
1426 1429 lambda f: self._writedirstate(tr, f),
1427 1430 location=b'plain',
1431 post_finalize=True,
1428 1432 )
1429 1433
1430 1434 # ensure that pending file written above is unlinked at
1431 1435 # failure, even if tr.writepending isn't invoked until the
1432 1436 # end of this transaction
1433 1437 tr.registertmp(filename, location=b'plain')
1434 1438
1435 1439 self._opener.tryunlink(backupname)
1436 1440 # hardlink backup is okay because _writedirstate is always called
1437 1441 # with an "atomictemp=True" file.
1438 1442 util.copyfile(
1439 1443 self._opener.join(filename),
1440 1444 self._opener.join(backupname),
1441 1445 hardlink=True,
1442 1446 )
1443 1447
1444 1448 def restorebackup(self, tr, backupname):
1445 1449 '''Restore dirstate by backup file'''
1446 1450 # this "invalidate()" prevents "wlock.release()" from writing
1447 1451 # changes of dirstate out after restoring from backup file
1448 1452 self.invalidate()
1449 1453 filename = self._actualfilename(tr)
1450 1454 o = self._opener
1451 1455 if util.samefile(o.join(backupname), o.join(filename)):
1452 1456 o.unlink(backupname)
1453 1457 else:
1454 1458 o.rename(backupname, filename, checkambig=True)
1455 1459
1456 1460 def clearbackup(self, tr, backupname):
1457 1461 '''Clear backup file'''
1458 1462 self._opener.unlink(backupname)
1459 1463
1460 1464 def verify(self, m1, m2):
1461 1465 """check the dirstate content again the parent manifest and yield errors"""
1462 1466 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1463 1467 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1464 1468 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1465 1469 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1466 1470 for f, entry in self.items():
1467 1471 state = entry.state
1468 1472 if state in b"nr" and f not in m1:
1469 1473 yield (missing_from_p1, f, state)
1470 1474 if state in b"a" and f in m1:
1471 1475 yield (unexpected_in_p1, f, state)
1472 1476 if state in b"m" and f not in m1 and f not in m2:
1473 1477 yield (missing_from_ps, f, state)
1474 1478 for f in m1:
1475 1479 state = self.get_entry(f).state
1476 1480 if state not in b"nrm":
1477 1481 yield (missing_from_ds, f, state)
@@ -1,773 +1,772 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
31 postfinalizegenerators = {
32 b'bookmarks',
33 b'dirstate-0-key-pre',
34 b'dirstate-1-main',
35 b'dirstate-2-key-post',
36 }
37
38 28 GEN_GROUP_ALL = b'all'
39 29 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
40 30 GEN_GROUP_POST_FINALIZE = b'postfinalize'
41 31
42 32
43 33 def active(func):
44 34 def _active(self, *args, **kwds):
45 35 if self._count == 0:
46 36 raise error.ProgrammingError(
47 37 b'cannot use transaction when it is already committed/aborted'
48 38 )
49 39 return func(self, *args, **kwds)
50 40
51 41 return _active
52 42
53 43
54 44 def _playback(
55 45 journal,
56 46 report,
57 47 opener,
58 48 vfsmap,
59 49 entries,
60 50 backupentries,
61 51 unlink=True,
62 52 checkambigfiles=None,
63 53 ):
64 54 for f, o in sorted(dict(entries).items()):
65 55 if o or not unlink:
66 56 checkambig = checkambigfiles and (f, b'') in checkambigfiles
67 57 try:
68 58 fp = opener(f, b'a', checkambig=checkambig)
69 59 if fp.tell() < o:
70 60 raise error.Abort(
71 61 _(
72 62 b"attempted to truncate %s to %d bytes, but it was "
73 63 b"already %d bytes\n"
74 64 )
75 65 % (f, o, fp.tell())
76 66 )
77 67 fp.truncate(o)
78 68 fp.close()
79 69 except IOError:
80 70 report(_(b"failed to truncate %s\n") % f)
81 71 raise
82 72 else:
83 73 try:
84 74 opener.unlink(f)
85 75 except (IOError, OSError) as inst:
86 76 if inst.errno != errno.ENOENT:
87 77 raise
88 78
89 79 backupfiles = []
90 80 for l, f, b, c in backupentries:
91 81 if l not in vfsmap and c:
92 82 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
93 83 vfs = vfsmap[l]
94 84 try:
95 85 if f and b:
96 86 filepath = vfs.join(f)
97 87 backuppath = vfs.join(b)
98 88 checkambig = checkambigfiles and (f, l) in checkambigfiles
99 89 try:
100 90 util.copyfile(backuppath, filepath, checkambig=checkambig)
101 91 backupfiles.append(b)
102 92 except IOError as exc:
103 93 e_msg = stringutil.forcebytestr(exc)
104 94 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
105 95 else:
106 96 target = f or b
107 97 try:
108 98 vfs.unlink(target)
109 99 except (IOError, OSError) as inst:
110 100 if inst.errno != errno.ENOENT:
111 101 raise
112 102 except (IOError, OSError, error.Abort):
113 103 if not c:
114 104 raise
115 105
116 106 backuppath = b"%s.backupfiles" % journal
117 107 if opener.exists(backuppath):
118 108 opener.unlink(backuppath)
119 109 opener.unlink(journal)
120 110 try:
121 111 for f in backupfiles:
122 112 if opener.exists(f):
123 113 opener.unlink(f)
124 114 except (IOError, OSError, error.Abort):
125 115 # only pure backup file remains, it is sage to ignore any error
126 116 pass
127 117
128 118
129 119 class transaction(util.transactional):
130 120 def __init__(
131 121 self,
132 122 report,
133 123 opener,
134 124 vfsmap,
135 125 journalname,
136 126 undoname=None,
137 127 after=None,
138 128 createmode=None,
139 129 validator=None,
140 130 releasefn=None,
141 131 checkambigfiles=None,
142 132 name='<unnamed>',
143 133 ):
144 134 """Begin a new transaction
145 135
146 136 Begins a new transaction that allows rolling back writes in the event of
147 137 an exception.
148 138
149 139 * `after`: called after the transaction has been committed
150 140 * `createmode`: the mode of the journal file that will be created
151 141 * `releasefn`: called after releasing (with transaction and result)
152 142
153 143 `checkambigfiles` is a set of (path, vfs-location) tuples,
154 144 which determine whether file stat ambiguity should be avoided
155 145 for corresponded files.
156 146 """
157 147 self._count = 1
158 148 self._usages = 1
159 149 self._report = report
160 150 # a vfs to the store content
161 151 self._opener = opener
162 152 # a map to access file in various {location -> vfs}
163 153 vfsmap = vfsmap.copy()
164 154 vfsmap[b''] = opener # set default value
165 155 self._vfsmap = vfsmap
166 156 self._after = after
167 157 self._offsetmap = {}
168 158 self._newfiles = set()
169 159 self._journal = journalname
170 160 self._undoname = undoname
171 161 self._queue = []
172 162 # A callback to do something just after releasing transaction.
173 163 if releasefn is None:
174 164 releasefn = lambda tr, success: None
175 165 self._releasefn = releasefn
176 166
177 167 self._checkambigfiles = set()
178 168 if checkambigfiles:
179 169 self._checkambigfiles.update(checkambigfiles)
180 170
181 171 self._names = [name]
182 172
183 173 # A dict dedicated to precisely tracking the changes introduced in the
184 174 # transaction.
185 175 self.changes = {}
186 176
187 177 # a dict of arguments to be passed to hooks
188 178 self.hookargs = {}
189 179 self._file = opener.open(self._journal, b"w+")
190 180
191 181 # a list of ('location', 'path', 'backuppath', cache) entries.
192 182 # - if 'backuppath' is empty, no file existed at backup time
193 183 # - if 'path' is empty, this is a temporary transaction file
194 184 # - if 'location' is not empty, the path is outside main opener reach.
195 185 # use 'location' value as a key in a vfsmap to find the right 'vfs'
196 186 # (cache is currently unused)
197 187 self._backupentries = []
198 188 self._backupmap = {}
199 189 self._backupjournal = b"%s.backupfiles" % self._journal
200 190 self._backupsfile = opener.open(self._backupjournal, b'w')
201 191 self._backupsfile.write(b'%d\n' % version)
202 192
203 193 if createmode is not None:
204 194 opener.chmod(self._journal, createmode & 0o666)
205 195 opener.chmod(self._backupjournal, createmode & 0o666)
206 196
207 197 # hold file generations to be performed on commit
208 198 self._filegenerators = {}
209 199 # hold callback to write pending data for hooks
210 200 self._pendingcallback = {}
211 201 # True is any pending data have been written ever
212 202 self._anypending = False
213 203 # holds callback to call when writing the transaction
214 204 self._finalizecallback = {}
215 205 # holds callback to call when validating the transaction
216 206 # should raise exception if anything is wrong
217 207 self._validatecallback = {}
218 208 if validator is not None:
219 209 self._validatecallback[b'001-userhooks'] = validator
220 210 # hold callback for post transaction close
221 211 self._postclosecallback = {}
222 212 # holds callbacks to call during abort
223 213 self._abortcallback = {}
224 214
225 215 def __repr__(self):
226 216 name = '/'.join(self._names)
227 217 return '<transaction name=%s, count=%d, usages=%d>' % (
228 218 name,
229 219 self._count,
230 220 self._usages,
231 221 )
232 222
233 223 def __del__(self):
234 224 if self._journal:
235 225 self._abort()
236 226
237 227 @property
238 228 def finalized(self):
239 229 return self._finalizecallback is None
240 230
241 231 @active
242 232 def startgroup(self):
243 233 """delay registration of file entry
244 234
245 235 This is used by strip to delay vision of strip offset. The transaction
246 236 sees either none or all of the strip actions to be done."""
247 237 self._queue.append([])
248 238
249 239 @active
250 240 def endgroup(self):
251 241 """apply delayed registration of file entry.
252 242
253 243 This is used by strip to delay vision of strip offset. The transaction
254 244 sees either none or all of the strip actions to be done."""
255 245 q = self._queue.pop()
256 246 for f, o in q:
257 247 self._addentry(f, o)
258 248
259 249 @active
260 250 def add(self, file, offset):
261 251 """record the state of an append-only file before update"""
262 252 if (
263 253 file in self._newfiles
264 254 or file in self._offsetmap
265 255 or file in self._backupmap
266 256 ):
267 257 return
268 258 if self._queue:
269 259 self._queue[-1].append((file, offset))
270 260 return
271 261
272 262 self._addentry(file, offset)
273 263
274 264 def _addentry(self, file, offset):
275 265 """add a append-only entry to memory and on-disk state"""
276 266 if (
277 267 file in self._newfiles
278 268 or file in self._offsetmap
279 269 or file in self._backupmap
280 270 ):
281 271 return
282 272 if offset:
283 273 self._offsetmap[file] = offset
284 274 else:
285 275 self._newfiles.add(file)
286 276 # add enough data to the journal to do the truncate
287 277 self._file.write(b"%s\0%d\n" % (file, offset))
288 278 self._file.flush()
289 279
290 280 @active
291 281 def addbackup(self, file, hardlink=True, location=b''):
292 282 """Adds a backup of the file to the transaction
293 283
294 284 Calling addbackup() creates a hardlink backup of the specified file
295 285 that is used to recover the file in the event of the transaction
296 286 aborting.
297 287
298 288 * `file`: the file path, relative to .hg/store
299 289 * `hardlink`: use a hardlink to quickly create the backup
300 290 """
301 291 if self._queue:
302 292 msg = b'cannot use transaction.addbackup inside "group"'
303 293 raise error.ProgrammingError(msg)
304 294
305 295 if (
306 296 file in self._newfiles
307 297 or file in self._offsetmap
308 298 or file in self._backupmap
309 299 ):
310 300 return
311 301 vfs = self._vfsmap[location]
312 302 dirname, filename = vfs.split(file)
313 303 backupfilename = b"%s.backup.%s" % (self._journal, filename)
314 304 backupfile = vfs.reljoin(dirname, backupfilename)
315 305 if vfs.exists(file):
316 306 filepath = vfs.join(file)
317 307 backuppath = vfs.join(backupfile)
318 308 util.copyfile(filepath, backuppath, hardlink=hardlink)
319 309 else:
320 310 backupfile = b''
321 311
322 312 self._addbackupentry((location, file, backupfile, False))
323 313
324 314 def _addbackupentry(self, entry):
325 315 """register a new backup entry and write it to disk"""
326 316 self._backupentries.append(entry)
327 317 self._backupmap[entry[1]] = len(self._backupentries) - 1
328 318 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
329 319 self._backupsfile.flush()
330 320
331 321 @active
332 322 def registertmp(self, tmpfile, location=b''):
333 323 """register a temporary transaction file
334 324
335 325 Such files will be deleted when the transaction exits (on both
336 326 failure and success).
337 327 """
338 328 self._addbackupentry((location, b'', tmpfile, False))
339 329
340 330 @active
341 331 def addfilegenerator(
342 self, genid, filenames, genfunc, order=0, location=b''
332 self,
333 genid,
334 filenames,
335 genfunc,
336 order=0,
337 location=b'',
338 post_finalize=False,
343 339 ):
344 340 """add a function to generates some files at transaction commit
345 341
346 342 The `genfunc` argument is a function capable of generating proper
347 343 content of each entry in the `filename` tuple.
348 344
349 345 At transaction close time, `genfunc` will be called with one file
350 346 object argument per entries in `filenames`.
351 347
352 348 The transaction itself is responsible for the backup, creation and
353 349 final write of such file.
354 350
355 351 The `genid` argument is used to ensure the same set of file is only
356 352 generated once. Call to `addfilegenerator` for a `genid` already
357 353 present will overwrite the old entry.
358 354
359 355 The `order` argument may be used to control the order in which multiple
360 356 generator will be executed.
361 357
362 358 The `location` arguments may be used to indicate the files are located
363 359 outside of the the standard directory for transaction. It should match
364 360 one of the key of the `transaction.vfsmap` dictionary.
361
362 The `post_finalize` argument can be set to `True` for file generation
363 that must be run after the transaction has been finalized.
365 364 """
366 365 # For now, we are unable to do proper backup and restore of custom vfs
367 366 # but for bookmarks that are handled outside this mechanism.
368 self._filegenerators[genid] = (order, filenames, genfunc, location)
367 entry = (order, filenames, genfunc, location, post_finalize)
368 self._filegenerators[genid] = entry
369 369
370 370 @active
371 371 def removefilegenerator(self, genid):
372 372 """reverse of addfilegenerator, remove a file generator function"""
373 373 if genid in self._filegenerators:
374 374 del self._filegenerators[genid]
375 375
376 376 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
377 377 # write files registered for generation
378 378 any = False
379 379
380 380 if group == GEN_GROUP_ALL:
381 381 skip_post = skip_pre = False
382 382 else:
383 383 skip_pre = group == GEN_GROUP_POST_FINALIZE
384 384 skip_post = group == GEN_GROUP_PRE_FINALIZE
385 385
386 386 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
387 387 any = True
388 order, filenames, genfunc, location = entry
388 order, filenames, genfunc, location, post_finalize = entry
389 389
390 390 # for generation at closing, check if it's before or after finalize
391 is_post = id in postfinalizegenerators
392 if skip_post and is_post:
391 if skip_post and post_finalize:
393 392 continue
394 elif skip_pre and not is_post:
393 elif skip_pre and not post_finalize:
395 394 continue
396 395
397 396 vfs = self._vfsmap[location]
398 397 files = []
399 398 try:
400 399 for name in filenames:
401 400 name += suffix
402 401 if suffix:
403 402 self.registertmp(name, location=location)
404 403 checkambig = False
405 404 else:
406 405 self.addbackup(name, location=location)
407 406 checkambig = (name, location) in self._checkambigfiles
408 407 files.append(
409 408 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
410 409 )
411 410 genfunc(*files)
412 411 for f in files:
413 412 f.close()
414 413 # skip discard() loop since we're sure no open file remains
415 414 del files[:]
416 415 finally:
417 416 for f in files:
418 417 f.discard()
419 418 return any
420 419
421 420 @active
422 421 def findoffset(self, file):
423 422 if file in self._newfiles:
424 423 return 0
425 424 return self._offsetmap.get(file)
426 425
427 426 @active
428 427 def readjournal(self):
429 428 self._file.seek(0)
430 429 entries = []
431 430 for l in self._file.readlines():
432 431 file, troffset = l.split(b'\0')
433 432 entries.append((file, int(troffset)))
434 433 return entries
435 434
436 435 @active
437 436 def replace(self, file, offset):
438 437 """
439 438 replace can only replace already committed entries
440 439 that are not pending in the queue
441 440 """
442 441 if file in self._newfiles:
443 442 if not offset:
444 443 return
445 444 self._newfiles.remove(file)
446 445 self._offsetmap[file] = offset
447 446 elif file in self._offsetmap:
448 447 if not offset:
449 448 del self._offsetmap[file]
450 449 self._newfiles.add(file)
451 450 else:
452 451 self._offsetmap[file] = offset
453 452 else:
454 453 raise KeyError(file)
455 454 self._file.write(b"%s\0%d\n" % (file, offset))
456 455 self._file.flush()
457 456
458 457 @active
459 458 def nest(self, name='<unnamed>'):
460 459 self._count += 1
461 460 self._usages += 1
462 461 self._names.append(name)
463 462 return self
464 463
465 464 def release(self):
466 465 if self._count > 0:
467 466 self._usages -= 1
468 467 if self._names:
469 468 self._names.pop()
470 469 # if the transaction scopes are left without being closed, fail
471 470 if self._count > 0 and self._usages == 0:
472 471 self._abort()
473 472
474 473 def running(self):
475 474 return self._count > 0
476 475
477 476 def addpending(self, category, callback):
478 477 """add a callback to be called when the transaction is pending
479 478
480 479 The transaction will be given as callback's first argument.
481 480
482 481 Category is a unique identifier to allow overwriting an old callback
483 482 with a newer callback.
484 483 """
485 484 self._pendingcallback[category] = callback
486 485
487 486 @active
488 487 def writepending(self):
489 488 """write pending file to temporary version
490 489
491 490 This is used to allow hooks to view a transaction before commit"""
492 491 categories = sorted(self._pendingcallback)
493 492 for cat in categories:
494 493 # remove callback since the data will have been flushed
495 494 any = self._pendingcallback.pop(cat)(self)
496 495 self._anypending = self._anypending or any
497 496 self._anypending |= self._generatefiles(suffix=b'.pending')
498 497 return self._anypending
499 498
500 499 @active
501 500 def hasfinalize(self, category):
502 501 """check is a callback already exist for a category"""
503 502 return category in self._finalizecallback
504 503
505 504 @active
506 505 def addfinalize(self, category, callback):
507 506 """add a callback to be called when the transaction is closed
508 507
509 508 The transaction will be given as callback's first argument.
510 509
511 510 Category is a unique identifier to allow overwriting old callbacks with
512 511 newer callbacks.
513 512 """
514 513 self._finalizecallback[category] = callback
515 514
516 515 @active
517 516 def addpostclose(self, category, callback):
518 517 """add or replace a callback to be called after the transaction closed
519 518
520 519 The transaction will be given as callback's first argument.
521 520
522 521 Category is a unique identifier to allow overwriting an old callback
523 522 with a newer callback.
524 523 """
525 524 self._postclosecallback[category] = callback
526 525
527 526 @active
528 527 def getpostclose(self, category):
529 528 """return a postclose callback added before, or None"""
530 529 return self._postclosecallback.get(category, None)
531 530
532 531 @active
533 532 def addabort(self, category, callback):
534 533 """add a callback to be called when the transaction is aborted.
535 534
536 535 The transaction will be given as the first argument to the callback.
537 536
538 537 Category is a unique identifier to allow overwriting an old callback
539 538 with a newer callback.
540 539 """
541 540 self._abortcallback[category] = callback
542 541
543 542 @active
544 543 def addvalidator(self, category, callback):
545 544 """adds a callback to be called when validating the transaction.
546 545
547 546 The transaction will be given as the first argument to the callback.
548 547
549 548 callback should raise exception if to abort transaction"""
550 549 self._validatecallback[category] = callback
551 550
552 551 @active
553 552 def close(self):
554 553 '''commit the transaction'''
555 554 if self._count == 1:
556 555 for category in sorted(self._validatecallback):
557 556 self._validatecallback[category](self)
558 557 self._validatecallback = None # Help prevent cycles.
559 558 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
560 559 while self._finalizecallback:
561 560 callbacks = self._finalizecallback
562 561 self._finalizecallback = {}
563 562 categories = sorted(callbacks)
564 563 for cat in categories:
565 564 callbacks[cat](self)
566 565 # Prevent double usage and help clear cycles.
567 566 self._finalizecallback = None
568 567 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
569 568
570 569 self._count -= 1
571 570 if self._count != 0:
572 571 return
573 572 self._file.close()
574 573 self._backupsfile.close()
575 574 # cleanup temporary files
576 575 for l, f, b, c in self._backupentries:
577 576 if l not in self._vfsmap and c:
578 577 self._report(
579 578 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
580 579 )
581 580 continue
582 581 vfs = self._vfsmap[l]
583 582 if not f and b and vfs.exists(b):
584 583 try:
585 584 vfs.unlink(b)
586 585 except (IOError, OSError, error.Abort) as inst:
587 586 if not c:
588 587 raise
589 588 # Abort may be raise by read only opener
590 589 self._report(
591 590 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
592 591 )
593 592 self._offsetmap = {}
594 593 self._newfiles = set()
595 594 self._writeundo()
596 595 if self._after:
597 596 self._after()
598 597 self._after = None # Help prevent cycles.
599 598 if self._opener.isfile(self._backupjournal):
600 599 self._opener.unlink(self._backupjournal)
601 600 if self._opener.isfile(self._journal):
602 601 self._opener.unlink(self._journal)
603 602 for l, _f, b, c in self._backupentries:
604 603 if l not in self._vfsmap and c:
605 604 self._report(
606 605 b"couldn't remove %s: unknown cache location"
607 606 b"%s\n" % (b, l)
608 607 )
609 608 continue
610 609 vfs = self._vfsmap[l]
611 610 if b and vfs.exists(b):
612 611 try:
613 612 vfs.unlink(b)
614 613 except (IOError, OSError, error.Abort) as inst:
615 614 if not c:
616 615 raise
617 616 # Abort may be raise by read only opener
618 617 self._report(
619 618 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
620 619 )
621 620 self._backupentries = []
622 621 self._journal = None
623 622
624 623 self._releasefn(self, True) # notify success of closing transaction
625 624 self._releasefn = None # Help prevent cycles.
626 625
627 626 # run post close action
628 627 categories = sorted(self._postclosecallback)
629 628 for cat in categories:
630 629 self._postclosecallback[cat](self)
631 630 # Prevent double usage and help clear cycles.
632 631 self._postclosecallback = None
633 632
634 633 @active
635 634 def abort(self):
636 635 """abort the transaction (generally called on error, or when the
637 636 transaction is not explicitly committed before going out of
638 637 scope)"""
639 638 self._abort()
640 639
641 640 def _writeundo(self):
642 641 """write transaction data for possible future undo call"""
643 642 if self._undoname is None:
644 643 return
645 644
646 645 undo_backup_path = b"%s.backupfiles" % self._undoname
647 646 undobackupfile = self._opener.open(undo_backup_path, b'w')
648 647 undobackupfile.write(b'%d\n' % version)
649 648 for l, f, b, c in self._backupentries:
650 649 if not f: # temporary file
651 650 continue
652 651 if not b:
653 652 u = b''
654 653 else:
655 654 if l not in self._vfsmap and c:
656 655 self._report(
657 656 b"couldn't remove %s: unknown cache location"
658 657 b"%s\n" % (b, l)
659 658 )
660 659 continue
661 660 vfs = self._vfsmap[l]
662 661 base, name = vfs.split(b)
663 662 assert name.startswith(self._journal), name
664 663 uname = name.replace(self._journal, self._undoname, 1)
665 664 u = vfs.reljoin(base, uname)
666 665 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
667 666 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
668 667 undobackupfile.close()
669 668
670 669 def _abort(self):
671 670 entries = self.readjournal()
672 671 self._count = 0
673 672 self._usages = 0
674 673 self._file.close()
675 674 self._backupsfile.close()
676 675
677 676 try:
678 677 if not entries and not self._backupentries:
679 678 if self._backupjournal:
680 679 self._opener.unlink(self._backupjournal)
681 680 if self._journal:
682 681 self._opener.unlink(self._journal)
683 682 return
684 683
685 684 self._report(_(b"transaction abort!\n"))
686 685
687 686 try:
688 687 for cat in sorted(self._abortcallback):
689 688 self._abortcallback[cat](self)
690 689 # Prevent double usage and help clear cycles.
691 690 self._abortcallback = None
692 691 _playback(
693 692 self._journal,
694 693 self._report,
695 694 self._opener,
696 695 self._vfsmap,
697 696 entries,
698 697 self._backupentries,
699 698 False,
700 699 checkambigfiles=self._checkambigfiles,
701 700 )
702 701 self._report(_(b"rollback completed\n"))
703 702 except BaseException as exc:
704 703 self._report(_(b"rollback failed - please run hg recover\n"))
705 704 self._report(
706 705 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
707 706 )
708 707 finally:
709 708 self._journal = None
710 709 self._releasefn(self, False) # notify failure of transaction
711 710 self._releasefn = None # Help prevent cycles.
712 711
713 712
714 713 BAD_VERSION_MSG = _(
715 714 b"journal was created by a different version of Mercurial\n"
716 715 )
717 716
718 717
719 718 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
720 719 """Rolls back the transaction contained in the given file
721 720
722 721 Reads the entries in the specified file, and the corresponding
723 722 '*.backupfiles' file, to recover from an incomplete transaction.
724 723
725 724 * `file`: a file containing a list of entries, specifying where
726 725 to truncate each file. The file should contain a list of
727 726 file\0offset pairs, delimited by newlines. The corresponding
728 727 '*.backupfiles' file should contain a list of file\0backupfile
729 728 pairs, delimited by \0.
730 729
731 730 `checkambigfiles` is a set of (path, vfs-location) tuples,
732 731 which determine whether file stat ambiguity should be avoided at
733 732 restoring corresponded files.
734 733 """
735 734 entries = []
736 735 backupentries = []
737 736
738 737 with opener.open(file) as fp:
739 738 lines = fp.readlines()
740 739 for l in lines:
741 740 try:
742 741 f, o = l.split(b'\0')
743 742 entries.append((f, int(o)))
744 743 except ValueError:
745 744 report(
746 745 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
747 746 )
748 747
749 748 backupjournal = b"%s.backupfiles" % file
750 749 if opener.exists(backupjournal):
751 750 fp = opener.open(backupjournal)
752 751 lines = fp.readlines()
753 752 if lines:
754 753 ver = lines[0][:-1]
755 754 if ver != (b'%d' % version):
756 755 report(BAD_VERSION_MSG)
757 756 else:
758 757 for line in lines[1:]:
759 758 if line:
760 759 # Shave off the trailing newline
761 760 line = line[:-1]
762 761 l, f, b, c = line.split(b'\0')
763 762 backupentries.append((l, f, b, bool(c)))
764 763
765 764 _playback(
766 765 file,
767 766 report,
768 767 opener,
769 768 vfsmap,
770 769 entries,
771 770 backupentries,
772 771 checkambigfiles=checkambigfiles,
773 772 )
@@ -1,32 +1,32 b''
1 1 # tiny extension to abort a transaction very late during test
2 2 #
3 3 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 error,
12 transaction,
13 12 )
14 13
15 14
16 15 def abort(fp):
17 16 raise error.Abort(b"This is a late abort")
18 17
19 18
20 19 def reposetup(ui, repo):
21
22 transaction.postfinalizegenerators.add(b'late-abort')
23
24 20 class LateAbortRepo(repo.__class__):
25 21 def transaction(self, *args, **kwargs):
26 22 tr = super(LateAbortRepo, self).transaction(*args, **kwargs)
27 23 tr.addfilegenerator(
28 b'late-abort', [b'late-abort'], abort, order=9999999
24 b'late-abort',
25 [b'late-abort'],
26 abort,
27 order=9999999,
28 post_finalize=True,
29 29 )
30 30 return tr
31 31
32 32 repo.__class__ = LateAbortRepo
General Comments 0
You need to be logged in to leave comments. Login now