##// END OF EJS Templates
branchcache: filter obsolete revisions sooner...
marmoute -
r52425:a0ef462c default
parent child Browse files
Show More
@@ -1,1274 +1,1272
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import struct
10 10
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullrev,
15 15 )
16 16
17 17 from typing import (
18 18 Any,
19 19 Callable,
20 20 Dict,
21 21 Iterable,
22 22 List,
23 23 Optional,
24 24 Set,
25 25 TYPE_CHECKING,
26 26 Tuple,
27 27 Union,
28 28 cast,
29 29 )
30 30
31 31 from . import (
32 32 encoding,
33 33 error,
34 34 obsolete,
35 35 scmutil,
36 36 util,
37 37 )
38 38
39 39 from .utils import (
40 40 repoviewutil,
41 41 stringutil,
42 42 )
43 43
44 44 if TYPE_CHECKING:
45 45 from . import localrepo
46 46
47 47 assert [localrepo]
48 48
49 49 subsettable = repoviewutil.subsettable
50 50
51 51 calcsize = struct.calcsize
52 52 pack_into = struct.pack_into
53 53 unpack_from = struct.unpack_from
54 54
55 55
56 56 class BranchMapCache:
57 57 """mapping of filtered views of repo with their branchcache"""
58 58
59 59 def __init__(self):
60 60 self._per_filter = {}
61 61
62 62 def __getitem__(self, repo):
63 63 self.updatecache(repo)
64 64 bcache = self._per_filter[repo.filtername]
65 65 assert bcache._filtername == repo.filtername, (
66 66 bcache._filtername,
67 67 repo.filtername,
68 68 )
69 69 return bcache
70 70
71 71 def update_disk(self, repo):
72 72 """ensure and up-to-date cache is (or will be) written on disk
73 73
74 74 The cache for this repository view is updated if needed and written on
75 75 disk.
76 76
77 77 If a transaction is in progress, the writing is schedule to transaction
78 78 close. See the `BranchMapCache.write_dirty` method.
79 79
80 80 This method exist independently of __getitem__ as it is sometime useful
81 81 to signal that we have no intend to use the data in memory yet.
82 82 """
83 83 self.updatecache(repo)
84 84 bcache = self._per_filter[repo.filtername]
85 85 assert bcache._filtername == repo.filtername, (
86 86 bcache._filtername,
87 87 repo.filtername,
88 88 )
89 89 tr = repo.currenttransaction()
90 90 if getattr(tr, 'finalized', True):
91 91 bcache.sync_disk(repo)
92 92
93 93 def updatecache(self, repo):
94 94 """Update the cache for the given filtered view on a repository"""
95 95 # This can trigger updates for the caches for subsets of the filtered
96 96 # view, e.g. when there is no cache for this filtered view or the cache
97 97 # is stale.
98 98
99 99 cl = repo.changelog
100 100 filtername = repo.filtername
101 101 bcache = self._per_filter.get(filtername)
102 102 if bcache is None or not bcache.validfor(repo):
103 103 # cache object missing or cache object stale? Read from disk
104 104 bcache = branch_cache_from_file(repo)
105 105
106 106 revs = []
107 107 if bcache is None:
108 108 # no (fresh) cache available anymore, perhaps we can re-use
109 109 # the cache for a subset, then extend that to add info on missing
110 110 # revisions.
111 111 subsetname = subsettable.get(filtername)
112 112 if subsetname is not None:
113 113 subset = repo.filtered(subsetname)
114 114 self.updatecache(subset)
115 115 bcache = self._per_filter[subset.filtername].inherit_for(repo)
116 116 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
117 117 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
118 118 else:
119 119 # nothing to fall back on, start empty.
120 120 bcache = new_branch_cache(repo)
121 121
122 122 revs.extend(cl.revs(start=bcache.tiprev + 1))
123 123 if revs:
124 124 bcache.update(repo, revs)
125 125
126 126 assert bcache.validfor(repo), filtername
127 127 self._per_filter[repo.filtername] = bcache
128 128
129 129 def replace(self, repo, remotebranchmap):
130 130 """Replace the branchmap cache for a repo with a branch mapping.
131 131
132 132 This is likely only called during clone with a branch map from a
133 133 remote.
134 134
135 135 """
136 136 cl = repo.changelog
137 137 clrev = cl.rev
138 138 clbranchinfo = cl.branchinfo
139 139 rbheads = []
140 140 closed = set()
141 141 for bheads in remotebranchmap.values():
142 142 rbheads += bheads
143 143 for h in bheads:
144 144 r = clrev(h)
145 145 b, c = clbranchinfo(r)
146 146 if c:
147 147 closed.add(h)
148 148
149 149 if rbheads:
150 150 rtiprev = max((int(clrev(node)) for node in rbheads))
151 151 cache = new_branch_cache(
152 152 repo,
153 153 remotebranchmap,
154 154 repo[rtiprev].node(),
155 155 rtiprev,
156 156 closednodes=closed,
157 157 )
158 158
159 159 # Try to stick it as low as possible
160 160 # filter above served are unlikely to be fetch from a clone
161 161 for candidate in (b'base', b'immutable', b'served'):
162 162 rview = repo.filtered(candidate)
163 163 if cache.validfor(rview):
164 164 cache._filtername = candidate
165 165 self._per_filter[candidate] = cache
166 166 cache._state = STATE_DIRTY
167 167 cache.write(rview)
168 168 return
169 169
170 170 def clear(self):
171 171 self._per_filter.clear()
172 172
173 173 def write_dirty(self, repo):
174 174 unfi = repo.unfiltered()
175 175 for filtername in repoviewutil.get_ordered_subset():
176 176 cache = self._per_filter.get(filtername)
177 177 if cache is None:
178 178 continue
179 179 if filtername is None:
180 180 repo = unfi
181 181 else:
182 182 repo = unfi.filtered(filtername)
183 183 cache.sync_disk(repo)
184 184
185 185
186 186 def _unknownnode(node):
187 187 """raises ValueError when branchcache found a node which does not exists"""
188 188 raise ValueError('node %s does not exist' % node.hex())
189 189
190 190
191 191 def _branchcachedesc(repo):
192 192 if repo.filtername is not None:
193 193 return b'branch cache (%s)' % repo.filtername
194 194 else:
195 195 return b'branch cache'
196 196
197 197
198 198 class _BaseBranchCache:
199 199 """A dict like object that hold branches heads cache.
200 200
201 201 This cache is used to avoid costly computations to determine all the
202 202 branch heads of a repo.
203 203 """
204 204
205 205 def __init__(
206 206 self,
207 207 repo: "localrepo.localrepository",
208 208 entries: Union[
209 209 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
210 210 ] = (),
211 211 closed_nodes: Optional[Set[bytes]] = None,
212 212 ) -> None:
213 213 """hasnode is a function which can be used to verify whether changelog
214 214 has a given node or not. If it's not provided, we assume that every node
215 215 we have exists in changelog"""
216 216 # closednodes is a set of nodes that close their branch. If the branch
217 217 # cache has been updated, it may contain nodes that are no longer
218 218 # heads.
219 219 if closed_nodes is None:
220 220 closed_nodes = set()
221 221 self._closednodes = set(closed_nodes)
222 222 self._entries = dict(entries)
223 223
224 224 def __iter__(self):
225 225 return iter(self._entries)
226 226
227 227 def __setitem__(self, key, value):
228 228 self._entries[key] = value
229 229
230 230 def __getitem__(self, key):
231 231 return self._entries[key]
232 232
233 233 def __contains__(self, key):
234 234 return key in self._entries
235 235
236 236 def iteritems(self):
237 237 return self._entries.items()
238 238
239 239 items = iteritems
240 240
241 241 def hasbranch(self, label):
242 242 """checks whether a branch of this name exists or not"""
243 243 return label in self._entries
244 244
245 245 def _branchtip(self, heads):
246 246 """Return tuple with last open head in heads and false,
247 247 otherwise return last closed head and true."""
248 248 tip = heads[-1]
249 249 closed = True
250 250 for h in reversed(heads):
251 251 if h not in self._closednodes:
252 252 tip = h
253 253 closed = False
254 254 break
255 255 return tip, closed
256 256
257 257 def branchtip(self, branch):
258 258 """Return the tipmost open head on branch head, otherwise return the
259 259 tipmost closed head on branch.
260 260 Raise KeyError for unknown branch."""
261 261 return self._branchtip(self[branch])[0]
262 262
263 263 def iteropen(self, nodes):
264 264 return (n for n in nodes if n not in self._closednodes)
265 265
266 266 def branchheads(self, branch, closed=False):
267 267 heads = self._entries[branch]
268 268 if not closed:
269 269 heads = list(self.iteropen(heads))
270 270 return heads
271 271
272 272 def iterbranches(self):
273 273 for bn, heads in self.items():
274 274 yield (bn, heads) + self._branchtip(heads)
275 275
276 276 def iterheads(self):
277 277 """returns all the heads"""
278 278 return self._entries.values()
279 279
280 280 def update(self, repo, revgen):
281 281 """Given a branchhead cache, self, that may have extra nodes or be
282 282 missing heads, and a generator of nodes that are strictly a superset of
283 283 heads missing, this function updates self to be correct.
284 284 """
285 285 starttime = util.timer()
286 286 cl = repo.changelog
287 # Faster than using ctx.obsolete()
288 obsrevs = obsolete.getrevs(repo, b'obsolete')
287 289 # collect new branch entries
288 290 newbranches = {}
289 291 getbranchinfo = repo.revbranchcache().branchinfo
290 292 max_rev = -1
291 293 for r in revgen:
294 max_rev = max(max_rev, r)
295 if r in obsrevs:
296 # We ignore obsolete changesets as they shouldn't be
297 # considered heads.
298 continue
292 299 branch, closesbranch = getbranchinfo(r)
293 300 newbranches.setdefault(branch, []).append(r)
294 301 if closesbranch:
295 302 self._closednodes.add(cl.node(r))
296 max_rev = max(max_rev, r)
297 303 if max_rev < 0:
298 304 msg = "running branchcache.update without revision to update"
299 305 raise error.ProgrammingError(msg)
300 306
301 307 # Delay fetching the topological heads until they are needed.
302 308 # A repository without non-continous branches can skip this part.
303 309 topoheads = None
304 310
305 311 # If a changeset is visible, its parents must be visible too, so
306 312 # use the faster unfiltered parent accessor.
307 313 parentrevs = repo.unfiltered().changelog.parentrevs
308 314
309 # Faster than using ctx.obsolete()
310 obsrevs = obsolete.getrevs(repo, b'obsolete')
311
312 315 for branch, newheadrevs in newbranches.items():
313 316 # For every branch, compute the new branchheads.
314 317 # A branchhead is a revision such that no descendant is on
315 318 # the same branch.
316 319 #
317 320 # The branchheads are computed iteratively in revision order.
318 321 # This ensures topological order, i.e. parents are processed
319 322 # before their children. Ancestors are inclusive here, i.e.
320 323 # any revision is an ancestor of itself.
321 324 #
322 325 # Core observations:
323 326 # - The current revision is always a branchhead for the
324 327 # repository up to that point.
325 328 # - It is the first revision of the branch if and only if
326 329 # there was no branchhead before. In that case, it is the
327 330 # only branchhead as there are no possible ancestors on
328 331 # the same branch.
329 332 # - If a parent is on the same branch, a branchhead can
330 333 # only be an ancestor of that parent, if it is parent
331 334 # itself. Otherwise it would have been removed as ancestor
332 335 # of that parent before.
333 336 # - Therefore, if all parents are on the same branch, they
334 337 # can just be removed from the branchhead set.
335 338 # - If one parent is on the same branch and the other is not
336 339 # and there was exactly one branchhead known, the existing
337 340 # branchhead can only be an ancestor if it is the parent.
338 341 # Otherwise it would have been removed as ancestor of
339 342 # the parent before. The other parent therefore can't have
340 343 # a branchhead as ancestor.
341 344 # - In all other cases, the parents on different branches
342 345 # could have a branchhead as ancestor. Those parents are
343 346 # kept in the "uncertain" set. If all branchheads are also
344 347 # topological heads, they can't have descendants and further
345 348 # checks can be skipped. Otherwise, the ancestors of the
346 349 # "uncertain" set are removed from branchheads.
347 350 # This computation is heavy and avoided if at all possible.
348 351 bheads = self._entries.get(branch, [])
349 352 bheadset = {cl.rev(node) for node in bheads}
350 353 uncertain = set()
351 354 for newrev in sorted(newheadrevs):
352 if newrev in obsrevs:
353 # We ignore obsolete changesets as they shouldn't be
354 # considered heads.
355 continue
356
357 355 if not bheadset:
358 356 bheadset.add(newrev)
359 357 continue
360 358
361 359 parents = [p for p in parentrevs(newrev) if p != nullrev]
362 360 samebranch = set()
363 361 otherbranch = set()
364 362 obsparents = set()
365 363 for p in parents:
366 364 if p in obsrevs:
367 365 # We ignored this obsolete changeset earlier, but now
368 366 # that it has non-ignored children, we need to make
369 367 # sure their ancestors are not considered heads. To
370 368 # achieve that, we will simply treat this obsolete
371 369 # changeset as a parent from other branch.
372 370 obsparents.add(p)
373 371 elif p in bheadset or getbranchinfo(p)[0] == branch:
374 372 samebranch.add(p)
375 373 else:
376 374 otherbranch.add(p)
377 375 if not (len(bheadset) == len(samebranch) == 1):
378 376 uncertain.update(otherbranch)
379 377 uncertain.update(obsparents)
380 378 bheadset.difference_update(samebranch)
381 379 bheadset.add(newrev)
382 380
383 381 if uncertain:
384 382 if topoheads is None:
385 383 topoheads = set(cl.headrevs())
386 384 if bheadset - topoheads:
387 385 floorrev = min(bheadset)
388 386 if floorrev <= max(uncertain):
389 387 ancestors = set(cl.ancestors(uncertain, floorrev))
390 388 bheadset -= ancestors
391 389 if bheadset:
392 390 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
393 391
394 392 duration = util.timer() - starttime
395 393 repo.ui.log(
396 394 b'branchcache',
397 395 b'updated %s in %.4f seconds\n',
398 396 _branchcachedesc(repo),
399 397 duration,
400 398 )
401 399 return max_rev
402 400
403 401
404 402 STATE_CLEAN = 1
405 403 STATE_INHERITED = 2
406 404 STATE_DIRTY = 3
407 405
408 406
409 407 class _LocalBranchCache(_BaseBranchCache):
410 408 """base class of branch-map info for a local repo or repoview"""
411 409
412 410 _base_filename = None
413 411 _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ())
414 412
415 413 def __init__(
416 414 self,
417 415 repo: "localrepo.localrepository",
418 416 entries: Union[
419 417 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
420 418 ] = (),
421 419 tipnode: Optional[bytes] = None,
422 420 tiprev: Optional[int] = nullrev,
423 421 key_hashes: Optional[Tuple[bytes]] = None,
424 422 closednodes: Optional[Set[bytes]] = None,
425 423 hasnode: Optional[Callable[[bytes], bool]] = None,
426 424 verify_node: bool = False,
427 425 inherited: bool = False,
428 426 ) -> None:
429 427 """hasnode is a function which can be used to verify whether changelog
430 428 has a given node or not. If it's not provided, we assume that every node
431 429 we have exists in changelog"""
432 430 self._filtername = repo.filtername
433 431 if tipnode is None:
434 432 self.tipnode = repo.nullid
435 433 else:
436 434 self.tipnode = tipnode
437 435 self.tiprev = tiprev
438 436 if key_hashes is None:
439 437 self.key_hashes = self._default_key_hashes
440 438 else:
441 439 self.key_hashes = key_hashes
442 440 self._state = STATE_CLEAN
443 441 if inherited:
444 442 self._state = STATE_INHERITED
445 443
446 444 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
447 445 # closednodes is a set of nodes that close their branch. If the branch
448 446 # cache has been updated, it may contain nodes that are no longer
449 447 # heads.
450 448
451 449 # Do we need to verify branch at all ?
452 450 self._verify_node = verify_node
453 451 # branches for which nodes are verified
454 452 self._verifiedbranches = set()
455 453 self._hasnode = None
456 454 if self._verify_node:
457 455 self._hasnode = repo.changelog.hasnode
458 456
459 457 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
460 458 raise NotImplementedError
461 459
462 460 def validfor(self, repo):
463 461 """check that cache contents are valid for (a subset of) this repo
464 462
465 463 - False when the order of changesets changed or if we detect a strip.
466 464 - True when cache is up-to-date for the current repo or its subset."""
467 465 try:
468 466 node = repo.changelog.node(self.tiprev)
469 467 except IndexError:
470 468 # changesets were stripped and now we don't even have enough to
471 469 # find tiprev
472 470 return False
473 471 if self.tipnode != node:
474 472 # tiprev doesn't correspond to tipnode: repo was stripped, or this
475 473 # repo has a different order of changesets
476 474 return False
477 475 repo_key_hashes = self._compute_key_hashes(repo)
478 476 # hashes don't match if this repo view has a different set of filtered
479 477 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
480 478 # history was rewritten)
481 479 return self.key_hashes == repo_key_hashes
482 480
483 481 @classmethod
484 482 def fromfile(cls, repo):
485 483 f = None
486 484 try:
487 485 f = repo.cachevfs(cls._filename(repo))
488 486 lineiter = iter(f)
489 487 init_kwargs = cls._load_header(repo, lineiter)
490 488 bcache = cls(
491 489 repo,
492 490 verify_node=True,
493 491 **init_kwargs,
494 492 )
495 493 if not bcache.validfor(repo):
496 494 # invalidate the cache
497 495 raise ValueError('tip differs')
498 496 bcache._load_heads(repo, lineiter)
499 497 except (IOError, OSError):
500 498 return None
501 499
502 500 except Exception as inst:
503 501 if repo.ui.debugflag:
504 502 msg = b'invalid %s: %s\n'
505 503 msg %= (
506 504 _branchcachedesc(repo),
507 505 stringutil.forcebytestr(inst),
508 506 )
509 507 repo.ui.debug(msg)
510 508 bcache = None
511 509
512 510 finally:
513 511 if f:
514 512 f.close()
515 513
516 514 return bcache
517 515
518 516 @classmethod
519 517 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
520 518 raise NotImplementedError
521 519
522 520 def _load_heads(self, repo, lineiter):
523 521 """fully loads the branchcache by reading from the file using the line
524 522 iterator passed"""
525 523 for line in lineiter:
526 524 line = line.rstrip(b'\n')
527 525 if not line:
528 526 continue
529 527 node, state, label = line.split(b" ", 2)
530 528 if state not in b'oc':
531 529 raise ValueError('invalid branch state')
532 530 label = encoding.tolocal(label.strip())
533 531 node = bin(node)
534 532 self._entries.setdefault(label, []).append(node)
535 533 if state == b'c':
536 534 self._closednodes.add(node)
537 535
538 536 @classmethod
539 537 def _filename(cls, repo):
540 538 """name of a branchcache file for a given repo or repoview"""
541 539 filename = cls._base_filename
542 540 assert filename is not None
543 541 if repo.filtername:
544 542 filename = b'%s-%s' % (filename, repo.filtername)
545 543 return filename
546 544
547 545 def inherit_for(self, repo):
548 546 """return a deep copy of the branchcache object"""
549 547 assert repo.filtername != self._filtername
550 548 other = type(self)(
551 549 repo=repo,
552 550 # we always do a shally copy of self._entries, and the values is
553 551 # always replaced, so no need to deepcopy until the above remains
554 552 # true.
555 553 entries=self._entries,
556 554 tipnode=self.tipnode,
557 555 tiprev=self.tiprev,
558 556 key_hashes=self.key_hashes,
559 557 closednodes=set(self._closednodes),
560 558 verify_node=self._verify_node,
561 559 inherited=True,
562 560 )
563 561 # also copy information about the current verification state
564 562 other._verifiedbranches = set(self._verifiedbranches)
565 563 return other
566 564
567 565 def sync_disk(self, repo):
568 566 """synchronise the on disk file with the cache state
569 567
570 568 If new value specific to this filter level need to be written, the file
571 569 will be updated, if the state of the branchcache is inherited from a
572 570 subset, any stalled on disk file will be deleted.
573 571
574 572 That method does nothing if there is nothing to do.
575 573 """
576 574 if self._state == STATE_DIRTY:
577 575 self.write(repo)
578 576 elif self._state == STATE_INHERITED:
579 577 filename = self._filename(repo)
580 578 repo.cachevfs.tryunlink(filename)
581 579
582 580 def write(self, repo):
583 581 assert self._filtername == repo.filtername, (
584 582 self._filtername,
585 583 repo.filtername,
586 584 )
587 585 assert self._state == STATE_DIRTY, self._state
588 586 # This method should not be called during an open transaction
589 587 tr = repo.currenttransaction()
590 588 if not getattr(tr, 'finalized', True):
591 589 msg = "writing branchcache in the middle of a transaction"
592 590 raise error.ProgrammingError(msg)
593 591 try:
594 592 filename = self._filename(repo)
595 593 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
596 594 self._write_header(f)
597 595 nodecount = self._write_heads(repo, f)
598 596 repo.ui.log(
599 597 b'branchcache',
600 598 b'wrote %s with %d labels and %d nodes\n',
601 599 _branchcachedesc(repo),
602 600 len(self._entries),
603 601 nodecount,
604 602 )
605 603 self._state = STATE_CLEAN
606 604 except (IOError, OSError, error.Abort) as inst:
607 605 # Abort may be raised by read only opener, so log and continue
608 606 repo.ui.debug(
609 607 b"couldn't write branch cache: %s\n"
610 608 % stringutil.forcebytestr(inst)
611 609 )
612 610
613 611 def _write_header(self, fp) -> None:
614 612 raise NotImplementedError
615 613
616 614 def _write_heads(self, repo, fp) -> int:
617 615 """write list of heads to a file
618 616
619 617 Return the number of heads written."""
620 618 nodecount = 0
621 619 for label, nodes in sorted(self._entries.items()):
622 620 label = encoding.fromlocal(label)
623 621 for node in nodes:
624 622 nodecount += 1
625 623 if node in self._closednodes:
626 624 state = b'c'
627 625 else:
628 626 state = b'o'
629 627 fp.write(b"%s %s %s\n" % (hex(node), state, label))
630 628 return nodecount
631 629
632 630 def _verifybranch(self, branch):
633 631 """verify head nodes for the given branch."""
634 632 if not self._verify_node:
635 633 return
636 634 if branch not in self._entries or branch in self._verifiedbranches:
637 635 return
638 636 assert self._hasnode is not None
639 637 for n in self._entries[branch]:
640 638 if not self._hasnode(n):
641 639 _unknownnode(n)
642 640
643 641 self._verifiedbranches.add(branch)
644 642
645 643 def _verifyall(self):
646 644 """verifies nodes of all the branches"""
647 645 for b in self._entries.keys():
648 646 if b not in self._verifiedbranches:
649 647 self._verifybranch(b)
650 648
651 649 def __getitem__(self, key):
652 650 self._verifybranch(key)
653 651 return super().__getitem__(key)
654 652
655 653 def __contains__(self, key):
656 654 self._verifybranch(key)
657 655 return super().__contains__(key)
658 656
659 657 def iteritems(self):
660 658 self._verifyall()
661 659 return super().iteritems()
662 660
663 661 items = iteritems
664 662
665 663 def iterheads(self):
666 664 """returns all the heads"""
667 665 self._verifyall()
668 666 return super().iterheads()
669 667
670 668 def hasbranch(self, label):
671 669 """checks whether a branch of this name exists or not"""
672 670 self._verifybranch(label)
673 671 return super().hasbranch(label)
674 672
675 673 def branchheads(self, branch, closed=False):
676 674 self._verifybranch(branch)
677 675 return super().branchheads(branch, closed=closed)
678 676
679 677 def update(self, repo, revgen):
680 678 assert self._filtername == repo.filtername, (
681 679 self._filtername,
682 680 repo.filtername,
683 681 )
684 682 cl = repo.changelog
685 683 max_rev = super().update(repo, revgen)
686 684 # new tip revision which we found after iterating items from new
687 685 # branches
688 686 if max_rev is not None and max_rev > self.tiprev:
689 687 self.tiprev = max_rev
690 688 self.tipnode = cl.node(max_rev)
691 689 else:
692 690 # We should not be here is if this is false
693 691 assert cl.node(self.tiprev) == self.tipnode
694 692
695 693 if not self.validfor(repo):
696 694 # the tiprev and tipnode should be aligned, so if the current repo
697 695 # is not seens as valid this is because old cache key is now
698 696 # invalid for the repo.
699 697 #
700 698 # However. we've just updated the cache and we assume it's valid,
701 699 # so let's make the cache key valid as well by recomputing it from
702 700 # the cached data
703 701 self.key_hashes = self._compute_key_hashes(repo)
704 702 self.filteredhash = scmutil.combined_filtered_and_obsolete_hash(
705 703 repo,
706 704 self.tiprev,
707 705 )
708 706
709 707 self._state = STATE_DIRTY
710 708 tr = repo.currenttransaction()
711 709 if getattr(tr, 'finalized', True):
712 710 # Avoid premature writing.
713 711 #
714 712 # (The cache warming setup by localrepo will update the file later.)
715 713 self.write(repo)
716 714
717 715
718 716 def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
719 717 """Build a branch cache from on-disk data if possible
720 718
721 719 Return a branch cache of the right format depending of the repository.
722 720 """
723 721 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
724 722 return BranchCacheV3.fromfile(repo)
725 723 else:
726 724 return BranchCacheV2.fromfile(repo)
727 725
728 726
729 727 def new_branch_cache(repo, *args, **kwargs):
730 728 """Build a new branch cache from argument
731 729
732 730 Return a branch cache of the right format depending of the repository.
733 731 """
734 732 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
735 733 return BranchCacheV3(repo, *args, **kwargs)
736 734 else:
737 735 return BranchCacheV2(repo, *args, **kwargs)
738 736
739 737
740 738 class BranchCacheV2(_LocalBranchCache):
741 739 """a branch cache using version 2 of the format on disk
742 740
743 741 The cache is serialized on disk in the following format:
744 742
745 743 <tip hex node> <tip rev number> [optional filtered repo hex hash]
746 744 <branch head hex node> <open/closed state> <branch name>
747 745 <branch head hex node> <open/closed state> <branch name>
748 746 ...
749 747
750 748 The first line is used to check if the cache is still valid. If the
751 749 branch cache is for a filtered repo view, an optional third hash is
752 750 included that hashes the hashes of all filtered and obsolete revisions.
753 751
754 752 The open/closed state is represented by a single letter 'o' or 'c'.
755 753 This field can be used to avoid changelog reads when determining if a
756 754 branch head closes a branch or not.
757 755 """
758 756
759 757 _base_filename = b"branch2"
760 758
761 759 @classmethod
762 760 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
763 761 """parse the head of a branchmap file
764 762
765 763 return parameters to pass to a newly created class instance.
766 764 """
767 765 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
768 766 last, lrev = cachekey[:2]
769 767 last, lrev = bin(last), int(lrev)
770 768 filteredhash = ()
771 769 if len(cachekey) > 2:
772 770 filteredhash = (bin(cachekey[2]),)
773 771 return {
774 772 "tipnode": last,
775 773 "tiprev": lrev,
776 774 "key_hashes": filteredhash,
777 775 }
778 776
779 777 def _write_header(self, fp) -> None:
780 778 """write the branch cache header to a file"""
781 779 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
782 780 if self.key_hashes:
783 781 cachekey.append(hex(self.key_hashes[0]))
784 782 fp.write(b" ".join(cachekey) + b'\n')
785 783
786 784 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
787 785 """return the cache key hashes that match this repoview state"""
788 786 filtered_hash = scmutil.combined_filtered_and_obsolete_hash(
789 787 repo,
790 788 self.tiprev,
791 789 needobsolete=True,
792 790 )
793 791 keys: Tuple[bytes] = cast(Tuple[bytes], ())
794 792 if filtered_hash is not None:
795 793 keys: Tuple[bytes] = (filtered_hash,)
796 794 return keys
797 795
798 796
799 797 class BranchCacheV3(_LocalBranchCache):
800 798 """a branch cache using version 3 of the format on disk
801 799
802 800 This version is still EXPERIMENTAL and the format is subject to changes.
803 801
804 802 The cache is serialized on disk in the following format:
805 803
806 804 <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> […]
807 805 <branch head hex node> <open/closed state> <branch name>
808 806 <branch head hex node> <open/closed state> <branch name>
809 807 ...
810 808
811 809 The first line is used to check if the cache is still valid. It is a series
812 810 of key value pair. The following key are recognized:
813 811
814 812 - tip-rev: the rev-num of the tip-most revision seen by this cache
815 813 - tip-node: the node-id of the tip-most revision sen by this cache
816 814 - filtered-hash: the hash of all filtered revisions (before tip-rev)
817 815 ignored by this cache.
818 816 - obsolete-hash: the hash of all non-filtered obsolete revisions (before
819 817 tip-rev) ignored by this cache.
820 818
821 819 The tip-rev is used to know how far behind the value in the file are
822 820 compared to the current repository state.
823 821
824 822 The tip-node, filtered-hash and obsolete-hash are used to detect if this
825 823 cache can be used for this repository state at all.
826 824
827 825 The open/closed state is represented by a single letter 'o' or 'c'.
828 826 This field can be used to avoid changelog reads when determining if a
829 827 branch head closes a branch or not.
830 828
831 829 Topological heads are not included in the listing and should be dispatched
832 830 on the right branch at read time. Obsolete topological heads should be
833 831 ignored.
834 832 """
835 833
836 834 _base_filename = b"branch3"
837 835 _default_key_hashes = (None, None)
838 836
839 837 def _get_topo_heads(self, repo) -> List[int]:
840 838 """returns the topological head of a repoview content up to self.tiprev"""
841 839 cl = repo.changelog
842 840 if self.tiprev == nullrev:
843 841 return []
844 842 elif self.tiprev == cl.tiprev():
845 843 return cl.headrevs()
846 844 else:
847 845 # XXX passing tiprev as ceiling of cl.headrevs could be faster
848 846 heads = cl.headrevs(cl.revs(stop=self.tiprev))
849 847 return heads
850 848
851 849 def _write_header(self, fp) -> None:
852 850 cache_keys = {
853 851 b"tip-node": hex(self.tipnode),
854 852 b"tip-rev": b'%d' % self.tiprev,
855 853 }
856 854 if self.key_hashes:
857 855 if self.key_hashes[0] is not None:
858 856 cache_keys[b"filtered-hash"] = hex(self.key_hashes[0])
859 857 if self.key_hashes[1] is not None:
860 858 cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1])
861 859 pieces = (b"%s=%s" % i for i in sorted(cache_keys.items()))
862 860 fp.write(b" ".join(pieces) + b'\n')
863 861
864 862 def _write_heads(self, repo, fp) -> int:
865 863 """write list of heads to a file
866 864
867 865 Return the number of heads written."""
868 866 nodecount = 0
869 867 topo_heads = set(self._get_topo_heads(repo))
870 868 to_rev = repo.changelog.index.rev
871 869 for label, nodes in sorted(self._entries.items()):
872 870 label = encoding.fromlocal(label)
873 871 for node in nodes:
874 872 rev = to_rev(node)
875 873 if rev in topo_heads:
876 874 continue
877 875 if node in self._closednodes:
878 876 state = b'c'
879 877 else:
880 878 state = b'o'
881 879 nodecount += 1
882 880 fp.write(b"%s %s %s\n" % (hex(node), state, label))
883 881 return nodecount
884 882
885 883 @classmethod
886 884 def _load_header(cls, repo, lineiter):
887 885 header_line = next(lineiter)
888 886 pieces = header_line.rstrip(b'\n').split(b" ")
889 887 cache_keys = dict(p.split(b'=', 1) for p in pieces)
890 888
891 889 args = {}
892 890 filtered_hash = None
893 891 obsolete_hash = None
894 892 for k, v in cache_keys.items():
895 893 if k == b"tip-rev":
896 894 args["tiprev"] = int(v)
897 895 elif k == b"tip-node":
898 896 args["tipnode"] = bin(v)
899 897 elif k == b"filtered-hash":
900 898 filtered_hash = bin(v)
901 899 elif k == b"obsolete-hash":
902 900 obsolete_hash = bin(v)
903 901 else:
904 902 msg = b"unknown cache key: %r" % k
905 903 raise ValueError(msg)
906 904 args["key_hashes"] = (filtered_hash, obsolete_hash)
907 905 return args
908 906
909 907 def _load_heads(self, repo, lineiter):
910 908 """fully loads the branchcache by reading from the file using the line
911 909 iterator passed"""
912 910 super()._load_heads(repo, lineiter)
913 911 cl = repo.changelog
914 912 getbranchinfo = repo.revbranchcache().branchinfo
915 913 obsrevs = obsolete.getrevs(repo, b'obsolete')
916 914 to_node = cl.node
917 915 touched_branch = set()
918 916 for head in self._get_topo_heads(repo):
919 917 if head in obsrevs:
920 918 continue
921 919 node = to_node(head)
922 920 branch, closed = getbranchinfo(head)
923 921 self._entries.setdefault(branch, []).append(node)
924 922 if closed:
925 923 self._closednodes.add(node)
926 924 touched_branch.add(branch)
927 925 to_rev = cl.index.rev
928 926 for branch in touched_branch:
929 927 self._entries[branch].sort(key=to_rev)
930 928
931 929 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
932 930 """return the cache key hashes that match this repoview state"""
933 931 return scmutil.filtered_and_obsolete_hash(
934 932 repo,
935 933 self.tiprev,
936 934 )
937 935
938 936
939 937 class remotebranchcache(_BaseBranchCache):
940 938 """Branchmap info for a remote connection, should not write locally"""
941 939
942 940 def __init__(
943 941 self,
944 942 repo: "localrepo.localrepository",
945 943 entries: Union[
946 944 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
947 945 ] = (),
948 946 closednodes: Optional[Set[bytes]] = None,
949 947 ) -> None:
950 948 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
951 949
952 950
953 951 # Revision branch info cache
954 952
955 953 _rbcversion = b'-v1'
956 954 _rbcnames = b'rbc-names' + _rbcversion
957 955 _rbcrevs = b'rbc-revs' + _rbcversion
958 956 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
959 957 _rbcrecfmt = b'>4sI'
960 958 _rbcrecsize = calcsize(_rbcrecfmt)
961 959 _rbcmininc = 64 * _rbcrecsize
962 960 _rbcnodelen = 4
963 961 _rbcbranchidxmask = 0x7FFFFFFF
964 962 _rbccloseflag = 0x80000000
965 963
966 964
967 965 class rbcrevs:
968 966 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
969 967
970 968 def __init__(self, revs):
971 969 self._prefix = revs
972 970 self._rest = bytearray()
973 971
974 972 def __len__(self):
975 973 return len(self._prefix) + len(self._rest)
976 974
977 975 def unpack_record(self, rbcrevidx):
978 976 if rbcrevidx < len(self._prefix):
979 977 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
980 978 else:
981 979 return unpack_from(
982 980 _rbcrecfmt,
983 981 util.buffer(self._rest),
984 982 rbcrevidx - len(self._prefix),
985 983 )
986 984
987 985 def make_mutable(self):
988 986 if len(self._prefix) > 0:
989 987 entirety = bytearray()
990 988 entirety[:] = self._prefix
991 989 entirety.extend(self._rest)
992 990 self._rest = entirety
993 991 self._prefix = bytearray()
994 992
995 993 def truncate(self, pos):
996 994 self.make_mutable()
997 995 del self._rest[pos:]
998 996
999 997 def pack_into(self, rbcrevidx, node, branchidx):
1000 998 if rbcrevidx < len(self._prefix):
1001 999 self.make_mutable()
1002 1000 buf = self._rest
1003 1001 start_offset = rbcrevidx - len(self._prefix)
1004 1002 end_offset = start_offset + _rbcrecsize
1005 1003
1006 1004 if len(self._rest) < end_offset:
1007 1005 # bytearray doesn't allocate extra space at least in Python 3.7.
1008 1006 # When multiple changesets are added in a row, precise resize would
1009 1007 # result in quadratic complexity. Overallocate to compensate by
1010 1008 # using the classic doubling technique for dynamic arrays instead.
1011 1009 # If there was a gap in the map before, less space will be reserved.
1012 1010 self._rest.extend(b'\0' * end_offset)
1013 1011 return pack_into(
1014 1012 _rbcrecfmt,
1015 1013 buf,
1016 1014 start_offset,
1017 1015 node,
1018 1016 branchidx,
1019 1017 )
1020 1018
1021 1019 def extend(self, extension):
1022 1020 return self._rest.extend(extension)
1023 1021
1024 1022 def slice(self, begin, end):
1025 1023 if begin < len(self._prefix):
1026 1024 acc = bytearray()
1027 1025 acc[:] = self._prefix[begin:end]
1028 1026 acc.extend(
1029 1027 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1030 1028 )
1031 1029 return acc
1032 1030 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1033 1031
1034 1032
1035 1033 class revbranchcache:
1036 1034 """Persistent cache, mapping from revision number to branch name and close.
1037 1035 This is a low level cache, independent of filtering.
1038 1036
1039 1037 Branch names are stored in rbc-names in internal encoding separated by 0.
1040 1038 rbc-names is append-only, and each branch name is only stored once and will
1041 1039 thus have a unique index.
1042 1040
1043 1041 The branch info for each revision is stored in rbc-revs as constant size
1044 1042 records. The whole file is read into memory, but it is only 'parsed' on
1045 1043 demand. The file is usually append-only but will be truncated if repo
1046 1044 modification is detected.
1047 1045 The record for each revision contains the first 4 bytes of the
1048 1046 corresponding node hash, and the record is only used if it still matches.
1049 1047 Even a completely trashed rbc-revs fill thus still give the right result
1050 1048 while converging towards full recovery ... assuming no incorrectly matching
1051 1049 node hashes.
1052 1050 The record also contains 4 bytes where 31 bits contains the index of the
1053 1051 branch and the last bit indicate that it is a branch close commit.
1054 1052 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
1055 1053 and will grow with it but be 1/8th of its size.
1056 1054 """
1057 1055
1058 1056 def __init__(self, repo, readonly=True):
1059 1057 assert repo.filtername is None
1060 1058 self._repo = repo
1061 1059 self._names = [] # branch names in local encoding with static index
1062 1060 self._rbcrevs = rbcrevs(bytearray())
1063 1061 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
1064 1062 try:
1065 1063 bndata = repo.cachevfs.read(_rbcnames)
1066 1064 self._rbcsnameslen = len(bndata) # for verification before writing
1067 1065 if bndata:
1068 1066 self._names = [
1069 1067 encoding.tolocal(bn) for bn in bndata.split(b'\0')
1070 1068 ]
1071 1069 except (IOError, OSError):
1072 1070 if readonly:
1073 1071 # don't try to use cache - fall back to the slow path
1074 1072 self.branchinfo = self._branchinfo
1075 1073
1076 1074 if self._names:
1077 1075 try:
1078 1076 if repo.ui.configbool(b'storage', b'revbranchcache.mmap'):
1079 1077 with repo.cachevfs(_rbcrevs) as fp:
1080 1078 data = util.buffer(util.mmapread(fp))
1081 1079 else:
1082 1080 data = repo.cachevfs.read(_rbcrevs)
1083 1081 self._rbcrevs = rbcrevs(data)
1084 1082 except (IOError, OSError) as inst:
1085 1083 repo.ui.debug(
1086 1084 b"couldn't read revision branch cache: %s\n"
1087 1085 % stringutil.forcebytestr(inst)
1088 1086 )
1089 1087 # remember number of good records on disk
1090 1088 self._rbcrevslen = min(
1091 1089 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
1092 1090 )
1093 1091 if self._rbcrevslen == 0:
1094 1092 self._names = []
1095 1093 self._rbcnamescount = len(self._names) # number of names read at
1096 1094 # _rbcsnameslen
1097 1095
1098 1096 def _clear(self):
1099 1097 self._rbcsnameslen = 0
1100 1098 del self._names[:]
1101 1099 self._rbcnamescount = 0
1102 1100 self._rbcrevslen = len(self._repo.changelog)
1103 1101 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
1104 1102 util.clearcachedproperty(self, b'_namesreverse')
1105 1103
1106 1104 @util.propertycache
1107 1105 def _namesreverse(self):
1108 1106 return {b: r for r, b in enumerate(self._names)}
1109 1107
1110 1108 def branchinfo(self, rev):
1111 1109 """Return branch name and close flag for rev, using and updating
1112 1110 persistent cache."""
1113 1111 changelog = self._repo.changelog
1114 1112 rbcrevidx = rev * _rbcrecsize
1115 1113
1116 1114 # avoid negative index, changelog.read(nullrev) is fast without cache
1117 1115 if rev == nullrev:
1118 1116 return changelog.branchinfo(rev)
1119 1117
1120 1118 # if requested rev isn't allocated, grow and cache the rev info
1121 1119 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
1122 1120 return self._branchinfo(rev)
1123 1121
1124 1122 # fast path: extract data from cache, use it if node is matching
1125 1123 reponode = changelog.node(rev)[:_rbcnodelen]
1126 1124 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
1127 1125 close = bool(branchidx & _rbccloseflag)
1128 1126 if close:
1129 1127 branchidx &= _rbcbranchidxmask
1130 1128 if cachenode == b'\0\0\0\0':
1131 1129 pass
1132 1130 elif cachenode == reponode:
1133 1131 try:
1134 1132 return self._names[branchidx], close
1135 1133 except IndexError:
1136 1134 # recover from invalid reference to unknown branch
1137 1135 self._repo.ui.debug(
1138 1136 b"referenced branch names not found"
1139 1137 b" - rebuilding revision branch cache from scratch\n"
1140 1138 )
1141 1139 self._clear()
1142 1140 else:
1143 1141 # rev/node map has changed, invalidate the cache from here up
1144 1142 self._repo.ui.debug(
1145 1143 b"history modification detected - truncating "
1146 1144 b"revision branch cache to revision %d\n" % rev
1147 1145 )
1148 1146 truncate = rbcrevidx + _rbcrecsize
1149 1147 self._rbcrevs.truncate(truncate)
1150 1148 self._rbcrevslen = min(self._rbcrevslen, truncate)
1151 1149
1152 1150 # fall back to slow path and make sure it will be written to disk
1153 1151 return self._branchinfo(rev)
1154 1152
1155 1153 def _branchinfo(self, rev):
1156 1154 """Retrieve branch info from changelog and update _rbcrevs"""
1157 1155 changelog = self._repo.changelog
1158 1156 b, close = changelog.branchinfo(rev)
1159 1157 if b in self._namesreverse:
1160 1158 branchidx = self._namesreverse[b]
1161 1159 else:
1162 1160 branchidx = len(self._names)
1163 1161 self._names.append(b)
1164 1162 self._namesreverse[b] = branchidx
1165 1163 reponode = changelog.node(rev)
1166 1164 if close:
1167 1165 branchidx |= _rbccloseflag
1168 1166 self._setcachedata(rev, reponode, branchidx)
1169 1167 return b, close
1170 1168
1171 1169 def setdata(self, rev, changelogrevision):
1172 1170 """add new data information to the cache"""
1173 1171 branch, close = changelogrevision.branchinfo
1174 1172
1175 1173 if branch in self._namesreverse:
1176 1174 branchidx = self._namesreverse[branch]
1177 1175 else:
1178 1176 branchidx = len(self._names)
1179 1177 self._names.append(branch)
1180 1178 self._namesreverse[branch] = branchidx
1181 1179 if close:
1182 1180 branchidx |= _rbccloseflag
1183 1181 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
1184 1182 # If no cache data were readable (non exists, bad permission, etc)
1185 1183 # the cache was bypassing itself by setting:
1186 1184 #
1187 1185 # self.branchinfo = self._branchinfo
1188 1186 #
1189 1187 # Since we now have data in the cache, we need to drop this bypassing.
1190 1188 if 'branchinfo' in vars(self):
1191 1189 del self.branchinfo
1192 1190
1193 1191 def _setcachedata(self, rev, node, branchidx):
1194 1192 """Writes the node's branch data to the in-memory cache data."""
1195 1193 if rev == nullrev:
1196 1194 return
1197 1195 rbcrevidx = rev * _rbcrecsize
1198 1196 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
1199 1197 self._rbcrevslen = min(self._rbcrevslen, rev)
1200 1198
1201 1199 tr = self._repo.currenttransaction()
1202 1200 if tr:
1203 1201 tr.addfinalize(b'write-revbranchcache', self.write)
1204 1202
1205 1203 def write(self, tr=None):
1206 1204 """Save branch cache if it is dirty."""
1207 1205 repo = self._repo
1208 1206 wlock = None
1209 1207 step = b''
1210 1208 try:
1211 1209 # write the new names
1212 1210 if self._rbcnamescount < len(self._names):
1213 1211 wlock = repo.wlock(wait=False)
1214 1212 step = b' names'
1215 1213 self._writenames(repo)
1216 1214
1217 1215 # write the new revs
1218 1216 start = self._rbcrevslen * _rbcrecsize
1219 1217 if start != len(self._rbcrevs):
1220 1218 step = b''
1221 1219 if wlock is None:
1222 1220 wlock = repo.wlock(wait=False)
1223 1221 self._writerevs(repo, start)
1224 1222
1225 1223 except (IOError, OSError, error.Abort, error.LockError) as inst:
1226 1224 repo.ui.debug(
1227 1225 b"couldn't write revision branch cache%s: %s\n"
1228 1226 % (step, stringutil.forcebytestr(inst))
1229 1227 )
1230 1228 finally:
1231 1229 if wlock is not None:
1232 1230 wlock.release()
1233 1231
1234 1232 def _writenames(self, repo):
1235 1233 """write the new branch names to revbranchcache"""
1236 1234 if self._rbcnamescount != 0:
1237 1235 f = repo.cachevfs.open(_rbcnames, b'ab')
1238 1236 if f.tell() == self._rbcsnameslen:
1239 1237 f.write(b'\0')
1240 1238 else:
1241 1239 f.close()
1242 1240 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1243 1241 self._rbcnamescount = 0
1244 1242 self._rbcrevslen = 0
1245 1243 if self._rbcnamescount == 0:
1246 1244 # before rewriting names, make sure references are removed
1247 1245 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1248 1246 f = repo.cachevfs.open(_rbcnames, b'wb')
1249 1247 f.write(
1250 1248 b'\0'.join(
1251 1249 encoding.fromlocal(b)
1252 1250 for b in self._names[self._rbcnamescount :]
1253 1251 )
1254 1252 )
1255 1253 self._rbcsnameslen = f.tell()
1256 1254 f.close()
1257 1255 self._rbcnamescount = len(self._names)
1258 1256
1259 1257 def _writerevs(self, repo, start):
1260 1258 """write the new revs to revbranchcache"""
1261 1259 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1262 1260 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1263 1261 if f.tell() != start:
1264 1262 repo.ui.debug(
1265 1263 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1266 1264 )
1267 1265 f.seek(start)
1268 1266 if f.tell() != start:
1269 1267 start = 0
1270 1268 f.seek(start)
1271 1269 f.truncate()
1272 1270 end = revs * _rbcrecsize
1273 1271 f.write(self._rbcrevs.slice(start, end))
1274 1272 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now