##// END OF EJS Templates
branchcache: gather newly closed head in a dedicated set...
marmoute -
r52427:767b62cb default
parent child Browse files
Show More
@@ -1,1274 +1,1277
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import struct
10 10
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullrev,
15 15 )
16 16
17 17 from typing import (
18 18 Any,
19 19 Callable,
20 20 Dict,
21 21 Iterable,
22 22 List,
23 23 Optional,
24 24 Set,
25 25 TYPE_CHECKING,
26 26 Tuple,
27 27 Union,
28 28 cast,
29 29 )
30 30
31 31 from . import (
32 32 encoding,
33 33 error,
34 34 obsolete,
35 35 scmutil,
36 36 util,
37 37 )
38 38
39 39 from .utils import (
40 40 repoviewutil,
41 41 stringutil,
42 42 )
43 43
44 44 if TYPE_CHECKING:
45 45 from . import localrepo
46 46
47 47 assert [localrepo]
48 48
49 49 subsettable = repoviewutil.subsettable
50 50
51 51 calcsize = struct.calcsize
52 52 pack_into = struct.pack_into
53 53 unpack_from = struct.unpack_from
54 54
55 55
56 56 class BranchMapCache:
57 57 """mapping of filtered views of repo with their branchcache"""
58 58
59 59 def __init__(self):
60 60 self._per_filter = {}
61 61
62 62 def __getitem__(self, repo):
63 63 self.updatecache(repo)
64 64 bcache = self._per_filter[repo.filtername]
65 65 assert bcache._filtername == repo.filtername, (
66 66 bcache._filtername,
67 67 repo.filtername,
68 68 )
69 69 return bcache
70 70
71 71 def update_disk(self, repo):
72 72 """ensure and up-to-date cache is (or will be) written on disk
73 73
74 74 The cache for this repository view is updated if needed and written on
75 75 disk.
76 76
77 77 If a transaction is in progress, the writing is schedule to transaction
78 78 close. See the `BranchMapCache.write_dirty` method.
79 79
80 80 This method exist independently of __getitem__ as it is sometime useful
81 81 to signal that we have no intend to use the data in memory yet.
82 82 """
83 83 self.updatecache(repo)
84 84 bcache = self._per_filter[repo.filtername]
85 85 assert bcache._filtername == repo.filtername, (
86 86 bcache._filtername,
87 87 repo.filtername,
88 88 )
89 89 tr = repo.currenttransaction()
90 90 if getattr(tr, 'finalized', True):
91 91 bcache.sync_disk(repo)
92 92
93 93 def updatecache(self, repo):
94 94 """Update the cache for the given filtered view on a repository"""
95 95 # This can trigger updates for the caches for subsets of the filtered
96 96 # view, e.g. when there is no cache for this filtered view or the cache
97 97 # is stale.
98 98
99 99 cl = repo.changelog
100 100 filtername = repo.filtername
101 101 bcache = self._per_filter.get(filtername)
102 102 if bcache is None or not bcache.validfor(repo):
103 103 # cache object missing or cache object stale? Read from disk
104 104 bcache = branch_cache_from_file(repo)
105 105
106 106 revs = []
107 107 if bcache is None:
108 108 # no (fresh) cache available anymore, perhaps we can re-use
109 109 # the cache for a subset, then extend that to add info on missing
110 110 # revisions.
111 111 subsetname = subsettable.get(filtername)
112 112 if subsetname is not None:
113 113 subset = repo.filtered(subsetname)
114 114 self.updatecache(subset)
115 115 bcache = self._per_filter[subset.filtername].inherit_for(repo)
116 116 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
117 117 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
118 118 else:
119 119 # nothing to fall back on, start empty.
120 120 bcache = new_branch_cache(repo)
121 121
122 122 revs.extend(cl.revs(start=bcache.tiprev + 1))
123 123 if revs:
124 124 bcache.update(repo, revs)
125 125
126 126 assert bcache.validfor(repo), filtername
127 127 self._per_filter[repo.filtername] = bcache
128 128
129 129 def replace(self, repo, remotebranchmap):
130 130 """Replace the branchmap cache for a repo with a branch mapping.
131 131
132 132 This is likely only called during clone with a branch map from a
133 133 remote.
134 134
135 135 """
136 136 cl = repo.changelog
137 137 clrev = cl.rev
138 138 clbranchinfo = cl.branchinfo
139 139 rbheads = []
140 140 closed = set()
141 141 for bheads in remotebranchmap.values():
142 142 rbheads += bheads
143 143 for h in bheads:
144 144 r = clrev(h)
145 145 b, c = clbranchinfo(r)
146 146 if c:
147 147 closed.add(h)
148 148
149 149 if rbheads:
150 150 rtiprev = max((int(clrev(node)) for node in rbheads))
151 151 cache = new_branch_cache(
152 152 repo,
153 153 remotebranchmap,
154 154 repo[rtiprev].node(),
155 155 rtiprev,
156 156 closednodes=closed,
157 157 )
158 158
159 159 # Try to stick it as low as possible
160 160 # filter above served are unlikely to be fetch from a clone
161 161 for candidate in (b'base', b'immutable', b'served'):
162 162 rview = repo.filtered(candidate)
163 163 if cache.validfor(rview):
164 164 cache._filtername = candidate
165 165 self._per_filter[candidate] = cache
166 166 cache._state = STATE_DIRTY
167 167 cache.write(rview)
168 168 return
169 169
170 170 def clear(self):
171 171 self._per_filter.clear()
172 172
173 173 def write_dirty(self, repo):
174 174 unfi = repo.unfiltered()
175 175 for filtername in repoviewutil.get_ordered_subset():
176 176 cache = self._per_filter.get(filtername)
177 177 if cache is None:
178 178 continue
179 179 if filtername is None:
180 180 repo = unfi
181 181 else:
182 182 repo = unfi.filtered(filtername)
183 183 cache.sync_disk(repo)
184 184
185 185
186 186 def _unknownnode(node):
187 187 """raises ValueError when branchcache found a node which does not exists"""
188 188 raise ValueError('node %s does not exist' % node.hex())
189 189
190 190
191 191 def _branchcachedesc(repo):
192 192 if repo.filtername is not None:
193 193 return b'branch cache (%s)' % repo.filtername
194 194 else:
195 195 return b'branch cache'
196 196
197 197
198 198 class _BaseBranchCache:
199 199 """A dict like object that hold branches heads cache.
200 200
201 201 This cache is used to avoid costly computations to determine all the
202 202 branch heads of a repo.
203 203 """
204 204
205 205 def __init__(
206 206 self,
207 207 repo: "localrepo.localrepository",
208 208 entries: Union[
209 209 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
210 210 ] = (),
211 211 closed_nodes: Optional[Set[bytes]] = None,
212 212 ) -> None:
213 213 """hasnode is a function which can be used to verify whether changelog
214 214 has a given node or not. If it's not provided, we assume that every node
215 215 we have exists in changelog"""
216 216 # closednodes is a set of nodes that close their branch. If the branch
217 217 # cache has been updated, it may contain nodes that are no longer
218 218 # heads.
219 219 if closed_nodes is None:
220 220 closed_nodes = set()
221 221 self._closednodes = set(closed_nodes)
222 222 self._entries = dict(entries)
223 223
224 224 def __iter__(self):
225 225 return iter(self._entries)
226 226
227 227 def __setitem__(self, key, value):
228 228 self._entries[key] = value
229 229
230 230 def __getitem__(self, key):
231 231 return self._entries[key]
232 232
233 233 def __contains__(self, key):
234 234 return key in self._entries
235 235
236 236 def iteritems(self):
237 237 return self._entries.items()
238 238
239 239 items = iteritems
240 240
241 241 def hasbranch(self, label):
242 242 """checks whether a branch of this name exists or not"""
243 243 return label in self._entries
244 244
245 245 def _branchtip(self, heads):
246 246 """Return tuple with last open head in heads and false,
247 247 otherwise return last closed head and true."""
248 248 tip = heads[-1]
249 249 closed = True
250 250 for h in reversed(heads):
251 251 if h not in self._closednodes:
252 252 tip = h
253 253 closed = False
254 254 break
255 255 return tip, closed
256 256
257 257 def branchtip(self, branch):
258 258 """Return the tipmost open head on branch head, otherwise return the
259 259 tipmost closed head on branch.
260 260 Raise KeyError for unknown branch."""
261 261 return self._branchtip(self[branch])[0]
262 262
263 263 def iteropen(self, nodes):
264 264 return (n for n in nodes if n not in self._closednodes)
265 265
266 266 def branchheads(self, branch, closed=False):
267 267 heads = self._entries[branch]
268 268 if not closed:
269 269 heads = list(self.iteropen(heads))
270 270 return heads
271 271
272 272 def iterbranches(self):
273 273 for bn, heads in self.items():
274 274 yield (bn, heads) + self._branchtip(heads)
275 275
276 276 def iterheads(self):
277 277 """returns all the heads"""
278 278 return self._entries.values()
279 279
280 280 def update(self, repo, revgen):
281 281 """Given a branchhead cache, self, that may have extra nodes or be
282 282 missing heads, and a generator of nodes that are strictly a superset of
283 283 heads missing, this function updates self to be correct.
284 284 """
285 285 starttime = util.timer()
286 286 cl = repo.changelog
287 287 # Faster than using ctx.obsolete()
288 288 obsrevs = obsolete.getrevs(repo, b'obsolete')
289 289 # collect new branch entries
290 290 newbranches = {}
291 new_closed = set()
291 292 obs_ignored = set()
292 293 getbranchinfo = repo.revbranchcache().branchinfo
293 294 max_rev = -1
294 295 for r in revgen:
295 296 max_rev = max(max_rev, r)
296 297 if r in obsrevs:
297 298 # We ignore obsolete changesets as they shouldn't be
298 299 # considered heads.
299 300 obs_ignored.add(r)
300 301 continue
301 302 branch, closesbranch = getbranchinfo(r)
302 303 newbranches.setdefault(branch, []).append(r)
303 304 if closesbranch:
304 self._closednodes.add(cl.node(r))
305 new_closed.add(r)
305 306 if max_rev < 0:
306 307 msg = "running branchcache.update without revision to update"
307 308 raise error.ProgrammingError(msg)
308 309
309 310 # Delay fetching the topological heads until they are needed.
310 311 # A repository without non-continous branches can skip this part.
311 312 topoheads = None
312 313
313 314 # If a changeset is visible, its parents must be visible too, so
314 315 # use the faster unfiltered parent accessor.
315 316 parentrevs = repo.unfiltered().changelog.parentrevs
316 317
317 318 for branch, newheadrevs in newbranches.items():
318 319 # For every branch, compute the new branchheads.
319 320 # A branchhead is a revision such that no descendant is on
320 321 # the same branch.
321 322 #
322 323 # The branchheads are computed iteratively in revision order.
323 324 # This ensures topological order, i.e. parents are processed
324 325 # before their children. Ancestors are inclusive here, i.e.
325 326 # any revision is an ancestor of itself.
326 327 #
327 328 # Core observations:
328 329 # - The current revision is always a branchhead for the
329 330 # repository up to that point.
330 331 # - It is the first revision of the branch if and only if
331 332 # there was no branchhead before. In that case, it is the
332 333 # only branchhead as there are no possible ancestors on
333 334 # the same branch.
334 335 # - If a parent is on the same branch, a branchhead can
335 336 # only be an ancestor of that parent, if it is parent
336 337 # itself. Otherwise it would have been removed as ancestor
337 338 # of that parent before.
338 339 # - Therefore, if all parents are on the same branch, they
339 340 # can just be removed from the branchhead set.
340 341 # - If one parent is on the same branch and the other is not
341 342 # and there was exactly one branchhead known, the existing
342 343 # branchhead can only be an ancestor if it is the parent.
343 344 # Otherwise it would have been removed as ancestor of
344 345 # the parent before. The other parent therefore can't have
345 346 # a branchhead as ancestor.
346 347 # - In all other cases, the parents on different branches
347 348 # could have a branchhead as ancestor. Those parents are
348 349 # kept in the "uncertain" set. If all branchheads are also
349 350 # topological heads, they can't have descendants and further
350 351 # checks can be skipped. Otherwise, the ancestors of the
351 352 # "uncertain" set are removed from branchheads.
352 353 # This computation is heavy and avoided if at all possible.
353 354 bheads = self._entries.get(branch, [])
354 355 bheadset = {cl.rev(node) for node in bheads}
355 356 uncertain = set()
356 357 for newrev in sorted(newheadrevs):
357 358 if not bheadset:
358 359 bheadset.add(newrev)
359 360 continue
360 361
361 362 parents = [p for p in parentrevs(newrev) if p != nullrev]
362 363 samebranch = set()
363 364 otherbranch = set()
364 365 obsparents = set()
365 366 for p in parents:
366 367 if p in obsrevs:
367 368 # We ignored this obsolete changeset earlier, but now
368 369 # that it has non-ignored children, we need to make
369 370 # sure their ancestors are not considered heads. To
370 371 # achieve that, we will simply treat this obsolete
371 372 # changeset as a parent from other branch.
372 373 obsparents.add(p)
373 374 elif p in bheadset or getbranchinfo(p)[0] == branch:
374 375 samebranch.add(p)
375 376 else:
376 377 otherbranch.add(p)
377 378 if not (len(bheadset) == len(samebranch) == 1):
378 379 uncertain.update(otherbranch)
379 380 uncertain.update(obsparents)
380 381 bheadset.difference_update(samebranch)
381 382 bheadset.add(newrev)
382 383
383 384 if uncertain:
384 385 if topoheads is None:
385 386 topoheads = set(cl.headrevs())
386 387 if bheadset - topoheads:
387 388 floorrev = min(bheadset)
388 389 if floorrev <= max(uncertain):
389 390 ancestors = set(cl.ancestors(uncertain, floorrev))
390 391 bheadset -= ancestors
391 392 if bheadset:
392 393 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
393 394
395 self._closednodes.update(cl.node(rev) for rev in new_closed)
396
394 397 duration = util.timer() - starttime
395 398 repo.ui.log(
396 399 b'branchcache',
397 400 b'updated %s in %.4f seconds\n',
398 401 _branchcachedesc(repo),
399 402 duration,
400 403 )
401 404 return max_rev
402 405
403 406
404 407 STATE_CLEAN = 1
405 408 STATE_INHERITED = 2
406 409 STATE_DIRTY = 3
407 410
408 411
409 412 class _LocalBranchCache(_BaseBranchCache):
410 413 """base class of branch-map info for a local repo or repoview"""
411 414
412 415 _base_filename = None
413 416 _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ())
414 417
415 418 def __init__(
416 419 self,
417 420 repo: "localrepo.localrepository",
418 421 entries: Union[
419 422 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
420 423 ] = (),
421 424 tipnode: Optional[bytes] = None,
422 425 tiprev: Optional[int] = nullrev,
423 426 key_hashes: Optional[Tuple[bytes]] = None,
424 427 closednodes: Optional[Set[bytes]] = None,
425 428 hasnode: Optional[Callable[[bytes], bool]] = None,
426 429 verify_node: bool = False,
427 430 inherited: bool = False,
428 431 ) -> None:
429 432 """hasnode is a function which can be used to verify whether changelog
430 433 has a given node or not. If it's not provided, we assume that every node
431 434 we have exists in changelog"""
432 435 self._filtername = repo.filtername
433 436 if tipnode is None:
434 437 self.tipnode = repo.nullid
435 438 else:
436 439 self.tipnode = tipnode
437 440 self.tiprev = tiprev
438 441 if key_hashes is None:
439 442 self.key_hashes = self._default_key_hashes
440 443 else:
441 444 self.key_hashes = key_hashes
442 445 self._state = STATE_CLEAN
443 446 if inherited:
444 447 self._state = STATE_INHERITED
445 448
446 449 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
447 450 # closednodes is a set of nodes that close their branch. If the branch
448 451 # cache has been updated, it may contain nodes that are no longer
449 452 # heads.
450 453
451 454 # Do we need to verify branch at all ?
452 455 self._verify_node = verify_node
453 456 # branches for which nodes are verified
454 457 self._verifiedbranches = set()
455 458 self._hasnode = None
456 459 if self._verify_node:
457 460 self._hasnode = repo.changelog.hasnode
458 461
459 462 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
460 463 raise NotImplementedError
461 464
462 465 def validfor(self, repo):
463 466 """check that cache contents are valid for (a subset of) this repo
464 467
465 468 - False when the order of changesets changed or if we detect a strip.
466 469 - True when cache is up-to-date for the current repo or its subset."""
467 470 try:
468 471 node = repo.changelog.node(self.tiprev)
469 472 except IndexError:
470 473 # changesets were stripped and now we don't even have enough to
471 474 # find tiprev
472 475 return False
473 476 if self.tipnode != node:
474 477 # tiprev doesn't correspond to tipnode: repo was stripped, or this
475 478 # repo has a different order of changesets
476 479 return False
477 480 repo_key_hashes = self._compute_key_hashes(repo)
478 481 # hashes don't match if this repo view has a different set of filtered
479 482 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
480 483 # history was rewritten)
481 484 return self.key_hashes == repo_key_hashes
482 485
483 486 @classmethod
484 487 def fromfile(cls, repo):
485 488 f = None
486 489 try:
487 490 f = repo.cachevfs(cls._filename(repo))
488 491 lineiter = iter(f)
489 492 init_kwargs = cls._load_header(repo, lineiter)
490 493 bcache = cls(
491 494 repo,
492 495 verify_node=True,
493 496 **init_kwargs,
494 497 )
495 498 if not bcache.validfor(repo):
496 499 # invalidate the cache
497 500 raise ValueError('tip differs')
498 501 bcache._load_heads(repo, lineiter)
499 502 except (IOError, OSError):
500 503 return None
501 504
502 505 except Exception as inst:
503 506 if repo.ui.debugflag:
504 507 msg = b'invalid %s: %s\n'
505 508 msg %= (
506 509 _branchcachedesc(repo),
507 510 stringutil.forcebytestr(inst),
508 511 )
509 512 repo.ui.debug(msg)
510 513 bcache = None
511 514
512 515 finally:
513 516 if f:
514 517 f.close()
515 518
516 519 return bcache
517 520
518 521 @classmethod
519 522 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
520 523 raise NotImplementedError
521 524
522 525 def _load_heads(self, repo, lineiter):
523 526 """fully loads the branchcache by reading from the file using the line
524 527 iterator passed"""
525 528 for line in lineiter:
526 529 line = line.rstrip(b'\n')
527 530 if not line:
528 531 continue
529 532 node, state, label = line.split(b" ", 2)
530 533 if state not in b'oc':
531 534 raise ValueError('invalid branch state')
532 535 label = encoding.tolocal(label.strip())
533 536 node = bin(node)
534 537 self._entries.setdefault(label, []).append(node)
535 538 if state == b'c':
536 539 self._closednodes.add(node)
537 540
538 541 @classmethod
539 542 def _filename(cls, repo):
540 543 """name of a branchcache file for a given repo or repoview"""
541 544 filename = cls._base_filename
542 545 assert filename is not None
543 546 if repo.filtername:
544 547 filename = b'%s-%s' % (filename, repo.filtername)
545 548 return filename
546 549
547 550 def inherit_for(self, repo):
548 551 """return a deep copy of the branchcache object"""
549 552 assert repo.filtername != self._filtername
550 553 other = type(self)(
551 554 repo=repo,
552 555 # we always do a shally copy of self._entries, and the values is
553 556 # always replaced, so no need to deepcopy until the above remains
554 557 # true.
555 558 entries=self._entries,
556 559 tipnode=self.tipnode,
557 560 tiprev=self.tiprev,
558 561 key_hashes=self.key_hashes,
559 562 closednodes=set(self._closednodes),
560 563 verify_node=self._verify_node,
561 564 inherited=True,
562 565 )
563 566 # also copy information about the current verification state
564 567 other._verifiedbranches = set(self._verifiedbranches)
565 568 return other
566 569
567 570 def sync_disk(self, repo):
568 571 """synchronise the on disk file with the cache state
569 572
570 573 If new value specific to this filter level need to be written, the file
571 574 will be updated, if the state of the branchcache is inherited from a
572 575 subset, any stalled on disk file will be deleted.
573 576
574 577 That method does nothing if there is nothing to do.
575 578 """
576 579 if self._state == STATE_DIRTY:
577 580 self.write(repo)
578 581 elif self._state == STATE_INHERITED:
579 582 filename = self._filename(repo)
580 583 repo.cachevfs.tryunlink(filename)
581 584
582 585 def write(self, repo):
583 586 assert self._filtername == repo.filtername, (
584 587 self._filtername,
585 588 repo.filtername,
586 589 )
587 590 assert self._state == STATE_DIRTY, self._state
588 591 # This method should not be called during an open transaction
589 592 tr = repo.currenttransaction()
590 593 if not getattr(tr, 'finalized', True):
591 594 msg = "writing branchcache in the middle of a transaction"
592 595 raise error.ProgrammingError(msg)
593 596 try:
594 597 filename = self._filename(repo)
595 598 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
596 599 self._write_header(f)
597 600 nodecount = self._write_heads(repo, f)
598 601 repo.ui.log(
599 602 b'branchcache',
600 603 b'wrote %s with %d labels and %d nodes\n',
601 604 _branchcachedesc(repo),
602 605 len(self._entries),
603 606 nodecount,
604 607 )
605 608 self._state = STATE_CLEAN
606 609 except (IOError, OSError, error.Abort) as inst:
607 610 # Abort may be raised by read only opener, so log and continue
608 611 repo.ui.debug(
609 612 b"couldn't write branch cache: %s\n"
610 613 % stringutil.forcebytestr(inst)
611 614 )
612 615
613 616 def _write_header(self, fp) -> None:
614 617 raise NotImplementedError
615 618
616 619 def _write_heads(self, repo, fp) -> int:
617 620 """write list of heads to a file
618 621
619 622 Return the number of heads written."""
620 623 nodecount = 0
621 624 for label, nodes in sorted(self._entries.items()):
622 625 label = encoding.fromlocal(label)
623 626 for node in nodes:
624 627 nodecount += 1
625 628 if node in self._closednodes:
626 629 state = b'c'
627 630 else:
628 631 state = b'o'
629 632 fp.write(b"%s %s %s\n" % (hex(node), state, label))
630 633 return nodecount
631 634
632 635 def _verifybranch(self, branch):
633 636 """verify head nodes for the given branch."""
634 637 if not self._verify_node:
635 638 return
636 639 if branch not in self._entries or branch in self._verifiedbranches:
637 640 return
638 641 assert self._hasnode is not None
639 642 for n in self._entries[branch]:
640 643 if not self._hasnode(n):
641 644 _unknownnode(n)
642 645
643 646 self._verifiedbranches.add(branch)
644 647
645 648 def _verifyall(self):
646 649 """verifies nodes of all the branches"""
647 650 for b in self._entries.keys():
648 651 if b not in self._verifiedbranches:
649 652 self._verifybranch(b)
650 653
651 654 def __getitem__(self, key):
652 655 self._verifybranch(key)
653 656 return super().__getitem__(key)
654 657
655 658 def __contains__(self, key):
656 659 self._verifybranch(key)
657 660 return super().__contains__(key)
658 661
659 662 def iteritems(self):
660 663 self._verifyall()
661 664 return super().iteritems()
662 665
663 666 items = iteritems
664 667
665 668 def iterheads(self):
666 669 """returns all the heads"""
667 670 self._verifyall()
668 671 return super().iterheads()
669 672
670 673 def hasbranch(self, label):
671 674 """checks whether a branch of this name exists or not"""
672 675 self._verifybranch(label)
673 676 return super().hasbranch(label)
674 677
675 678 def branchheads(self, branch, closed=False):
676 679 self._verifybranch(branch)
677 680 return super().branchheads(branch, closed=closed)
678 681
679 682 def update(self, repo, revgen):
680 683 assert self._filtername == repo.filtername, (
681 684 self._filtername,
682 685 repo.filtername,
683 686 )
684 687 cl = repo.changelog
685 688 max_rev = super().update(repo, revgen)
686 689 # new tip revision which we found after iterating items from new
687 690 # branches
688 691 if max_rev is not None and max_rev > self.tiprev:
689 692 self.tiprev = max_rev
690 693 self.tipnode = cl.node(max_rev)
691 694 else:
692 695 # We should not be here is if this is false
693 696 assert cl.node(self.tiprev) == self.tipnode
694 697
695 698 if not self.validfor(repo):
696 699 # the tiprev and tipnode should be aligned, so if the current repo
697 700 # is not seens as valid this is because old cache key is now
698 701 # invalid for the repo.
699 702 #
700 703 # However. we've just updated the cache and we assume it's valid,
701 704 # so let's make the cache key valid as well by recomputing it from
702 705 # the cached data
703 706 self.key_hashes = self._compute_key_hashes(repo)
704 707 self.filteredhash = scmutil.combined_filtered_and_obsolete_hash(
705 708 repo,
706 709 self.tiprev,
707 710 )
708 711
709 712 self._state = STATE_DIRTY
710 713 tr = repo.currenttransaction()
711 714 if getattr(tr, 'finalized', True):
712 715 # Avoid premature writing.
713 716 #
714 717 # (The cache warming setup by localrepo will update the file later.)
715 718 self.write(repo)
716 719
717 720
718 721 def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
719 722 """Build a branch cache from on-disk data if possible
720 723
721 724 Return a branch cache of the right format depending of the repository.
722 725 """
723 726 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
724 727 return BranchCacheV3.fromfile(repo)
725 728 else:
726 729 return BranchCacheV2.fromfile(repo)
727 730
728 731
729 732 def new_branch_cache(repo, *args, **kwargs):
730 733 """Build a new branch cache from argument
731 734
732 735 Return a branch cache of the right format depending of the repository.
733 736 """
734 737 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
735 738 return BranchCacheV3(repo, *args, **kwargs)
736 739 else:
737 740 return BranchCacheV2(repo, *args, **kwargs)
738 741
739 742
740 743 class BranchCacheV2(_LocalBranchCache):
741 744 """a branch cache using version 2 of the format on disk
742 745
743 746 The cache is serialized on disk in the following format:
744 747
745 748 <tip hex node> <tip rev number> [optional filtered repo hex hash]
746 749 <branch head hex node> <open/closed state> <branch name>
747 750 <branch head hex node> <open/closed state> <branch name>
748 751 ...
749 752
750 753 The first line is used to check if the cache is still valid. If the
751 754 branch cache is for a filtered repo view, an optional third hash is
752 755 included that hashes the hashes of all filtered and obsolete revisions.
753 756
754 757 The open/closed state is represented by a single letter 'o' or 'c'.
755 758 This field can be used to avoid changelog reads when determining if a
756 759 branch head closes a branch or not.
757 760 """
758 761
759 762 _base_filename = b"branch2"
760 763
761 764 @classmethod
762 765 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
763 766 """parse the head of a branchmap file
764 767
765 768 return parameters to pass to a newly created class instance.
766 769 """
767 770 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
768 771 last, lrev = cachekey[:2]
769 772 last, lrev = bin(last), int(lrev)
770 773 filteredhash = ()
771 774 if len(cachekey) > 2:
772 775 filteredhash = (bin(cachekey[2]),)
773 776 return {
774 777 "tipnode": last,
775 778 "tiprev": lrev,
776 779 "key_hashes": filteredhash,
777 780 }
778 781
779 782 def _write_header(self, fp) -> None:
780 783 """write the branch cache header to a file"""
781 784 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
782 785 if self.key_hashes:
783 786 cachekey.append(hex(self.key_hashes[0]))
784 787 fp.write(b" ".join(cachekey) + b'\n')
785 788
786 789 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
787 790 """return the cache key hashes that match this repoview state"""
788 791 filtered_hash = scmutil.combined_filtered_and_obsolete_hash(
789 792 repo,
790 793 self.tiprev,
791 794 needobsolete=True,
792 795 )
793 796 keys: Tuple[bytes] = cast(Tuple[bytes], ())
794 797 if filtered_hash is not None:
795 798 keys: Tuple[bytes] = (filtered_hash,)
796 799 return keys
797 800
798 801
799 802 class BranchCacheV3(_LocalBranchCache):
800 803 """a branch cache using version 3 of the format on disk
801 804
802 805 This version is still EXPERIMENTAL and the format is subject to changes.
803 806
804 807 The cache is serialized on disk in the following format:
805 808
806 809 <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> […]
807 810 <branch head hex node> <open/closed state> <branch name>
808 811 <branch head hex node> <open/closed state> <branch name>
809 812 ...
810 813
811 814 The first line is used to check if the cache is still valid. It is a series
812 815 of key value pair. The following key are recognized:
813 816
814 817 - tip-rev: the rev-num of the tip-most revision seen by this cache
815 818 - tip-node: the node-id of the tip-most revision sen by this cache
816 819 - filtered-hash: the hash of all filtered revisions (before tip-rev)
817 820 ignored by this cache.
818 821 - obsolete-hash: the hash of all non-filtered obsolete revisions (before
819 822 tip-rev) ignored by this cache.
820 823
821 824 The tip-rev is used to know how far behind the value in the file are
822 825 compared to the current repository state.
823 826
824 827 The tip-node, filtered-hash and obsolete-hash are used to detect if this
825 828 cache can be used for this repository state at all.
826 829
827 830 The open/closed state is represented by a single letter 'o' or 'c'.
828 831 This field can be used to avoid changelog reads when determining if a
829 832 branch head closes a branch or not.
830 833
831 834 Topological heads are not included in the listing and should be dispatched
832 835 on the right branch at read time. Obsolete topological heads should be
833 836 ignored.
834 837 """
835 838
836 839 _base_filename = b"branch3"
837 840 _default_key_hashes = (None, None)
838 841
839 842 def _get_topo_heads(self, repo) -> List[int]:
840 843 """returns the topological head of a repoview content up to self.tiprev"""
841 844 cl = repo.changelog
842 845 if self.tiprev == nullrev:
843 846 return []
844 847 elif self.tiprev == cl.tiprev():
845 848 return cl.headrevs()
846 849 else:
847 850 # XXX passing tiprev as ceiling of cl.headrevs could be faster
848 851 heads = cl.headrevs(cl.revs(stop=self.tiprev))
849 852 return heads
850 853
851 854 def _write_header(self, fp) -> None:
852 855 cache_keys = {
853 856 b"tip-node": hex(self.tipnode),
854 857 b"tip-rev": b'%d' % self.tiprev,
855 858 }
856 859 if self.key_hashes:
857 860 if self.key_hashes[0] is not None:
858 861 cache_keys[b"filtered-hash"] = hex(self.key_hashes[0])
859 862 if self.key_hashes[1] is not None:
860 863 cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1])
861 864 pieces = (b"%s=%s" % i for i in sorted(cache_keys.items()))
862 865 fp.write(b" ".join(pieces) + b'\n')
863 866
864 867 def _write_heads(self, repo, fp) -> int:
865 868 """write list of heads to a file
866 869
867 870 Return the number of heads written."""
868 871 nodecount = 0
869 872 topo_heads = set(self._get_topo_heads(repo))
870 873 to_rev = repo.changelog.index.rev
871 874 for label, nodes in sorted(self._entries.items()):
872 875 label = encoding.fromlocal(label)
873 876 for node in nodes:
874 877 rev = to_rev(node)
875 878 if rev in topo_heads:
876 879 continue
877 880 if node in self._closednodes:
878 881 state = b'c'
879 882 else:
880 883 state = b'o'
881 884 nodecount += 1
882 885 fp.write(b"%s %s %s\n" % (hex(node), state, label))
883 886 return nodecount
884 887
885 888 @classmethod
886 889 def _load_header(cls, repo, lineiter):
887 890 header_line = next(lineiter)
888 891 pieces = header_line.rstrip(b'\n').split(b" ")
889 892 cache_keys = dict(p.split(b'=', 1) for p in pieces)
890 893
891 894 args = {}
892 895 filtered_hash = None
893 896 obsolete_hash = None
894 897 for k, v in cache_keys.items():
895 898 if k == b"tip-rev":
896 899 args["tiprev"] = int(v)
897 900 elif k == b"tip-node":
898 901 args["tipnode"] = bin(v)
899 902 elif k == b"filtered-hash":
900 903 filtered_hash = bin(v)
901 904 elif k == b"obsolete-hash":
902 905 obsolete_hash = bin(v)
903 906 else:
904 907 msg = b"unknown cache key: %r" % k
905 908 raise ValueError(msg)
906 909 args["key_hashes"] = (filtered_hash, obsolete_hash)
907 910 return args
908 911
909 912 def _load_heads(self, repo, lineiter):
910 913 """fully loads the branchcache by reading from the file using the line
911 914 iterator passed"""
912 915 super()._load_heads(repo, lineiter)
913 916 cl = repo.changelog
914 917 getbranchinfo = repo.revbranchcache().branchinfo
915 918 obsrevs = obsolete.getrevs(repo, b'obsolete')
916 919 to_node = cl.node
917 920 touched_branch = set()
918 921 for head in self._get_topo_heads(repo):
919 922 if head in obsrevs:
920 923 continue
921 924 node = to_node(head)
922 925 branch, closed = getbranchinfo(head)
923 926 self._entries.setdefault(branch, []).append(node)
924 927 if closed:
925 928 self._closednodes.add(node)
926 929 touched_branch.add(branch)
927 930 to_rev = cl.index.rev
928 931 for branch in touched_branch:
929 932 self._entries[branch].sort(key=to_rev)
930 933
931 934 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
932 935 """return the cache key hashes that match this repoview state"""
933 936 return scmutil.filtered_and_obsolete_hash(
934 937 repo,
935 938 self.tiprev,
936 939 )
937 940
938 941
939 942 class remotebranchcache(_BaseBranchCache):
940 943 """Branchmap info for a remote connection, should not write locally"""
941 944
942 945 def __init__(
943 946 self,
944 947 repo: "localrepo.localrepository",
945 948 entries: Union[
946 949 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
947 950 ] = (),
948 951 closednodes: Optional[Set[bytes]] = None,
949 952 ) -> None:
950 953 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
951 954
952 955
953 956 # Revision branch info cache
954 957
955 958 _rbcversion = b'-v1'
956 959 _rbcnames = b'rbc-names' + _rbcversion
957 960 _rbcrevs = b'rbc-revs' + _rbcversion
958 961 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
959 962 _rbcrecfmt = b'>4sI'
960 963 _rbcrecsize = calcsize(_rbcrecfmt)
961 964 _rbcmininc = 64 * _rbcrecsize
962 965 _rbcnodelen = 4
963 966 _rbcbranchidxmask = 0x7FFFFFFF
964 967 _rbccloseflag = 0x80000000
965 968
966 969
967 970 class rbcrevs:
968 971 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
969 972
970 973 def __init__(self, revs):
971 974 self._prefix = revs
972 975 self._rest = bytearray()
973 976
974 977 def __len__(self):
975 978 return len(self._prefix) + len(self._rest)
976 979
977 980 def unpack_record(self, rbcrevidx):
978 981 if rbcrevidx < len(self._prefix):
979 982 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
980 983 else:
981 984 return unpack_from(
982 985 _rbcrecfmt,
983 986 util.buffer(self._rest),
984 987 rbcrevidx - len(self._prefix),
985 988 )
986 989
987 990 def make_mutable(self):
988 991 if len(self._prefix) > 0:
989 992 entirety = bytearray()
990 993 entirety[:] = self._prefix
991 994 entirety.extend(self._rest)
992 995 self._rest = entirety
993 996 self._prefix = bytearray()
994 997
995 998 def truncate(self, pos):
996 999 self.make_mutable()
997 1000 del self._rest[pos:]
998 1001
999 1002 def pack_into(self, rbcrevidx, node, branchidx):
1000 1003 if rbcrevidx < len(self._prefix):
1001 1004 self.make_mutable()
1002 1005 buf = self._rest
1003 1006 start_offset = rbcrevidx - len(self._prefix)
1004 1007 end_offset = start_offset + _rbcrecsize
1005 1008
1006 1009 if len(self._rest) < end_offset:
1007 1010 # bytearray doesn't allocate extra space at least in Python 3.7.
1008 1011 # When multiple changesets are added in a row, precise resize would
1009 1012 # result in quadratic complexity. Overallocate to compensate by
1010 1013 # using the classic doubling technique for dynamic arrays instead.
1011 1014 # If there was a gap in the map before, less space will be reserved.
1012 1015 self._rest.extend(b'\0' * end_offset)
1013 1016 return pack_into(
1014 1017 _rbcrecfmt,
1015 1018 buf,
1016 1019 start_offset,
1017 1020 node,
1018 1021 branchidx,
1019 1022 )
1020 1023
1021 1024 def extend(self, extension):
1022 1025 return self._rest.extend(extension)
1023 1026
1024 1027 def slice(self, begin, end):
1025 1028 if begin < len(self._prefix):
1026 1029 acc = bytearray()
1027 1030 acc[:] = self._prefix[begin:end]
1028 1031 acc.extend(
1029 1032 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1030 1033 )
1031 1034 return acc
1032 1035 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1033 1036
1034 1037
1035 1038 class revbranchcache:
1036 1039 """Persistent cache, mapping from revision number to branch name and close.
1037 1040 This is a low level cache, independent of filtering.
1038 1041
1039 1042 Branch names are stored in rbc-names in internal encoding separated by 0.
1040 1043 rbc-names is append-only, and each branch name is only stored once and will
1041 1044 thus have a unique index.
1042 1045
1043 1046 The branch info for each revision is stored in rbc-revs as constant size
1044 1047 records. The whole file is read into memory, but it is only 'parsed' on
1045 1048 demand. The file is usually append-only but will be truncated if repo
1046 1049 modification is detected.
1047 1050 The record for each revision contains the first 4 bytes of the
1048 1051 corresponding node hash, and the record is only used if it still matches.
1049 1052 Even a completely trashed rbc-revs fill thus still give the right result
1050 1053 while converging towards full recovery ... assuming no incorrectly matching
1051 1054 node hashes.
1052 1055 The record also contains 4 bytes where 31 bits contains the index of the
1053 1056 branch and the last bit indicate that it is a branch close commit.
1054 1057 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
1055 1058 and will grow with it but be 1/8th of its size.
1056 1059 """
1057 1060
1058 1061 def __init__(self, repo, readonly=True):
1059 1062 assert repo.filtername is None
1060 1063 self._repo = repo
1061 1064 self._names = [] # branch names in local encoding with static index
1062 1065 self._rbcrevs = rbcrevs(bytearray())
1063 1066 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
1064 1067 try:
1065 1068 bndata = repo.cachevfs.read(_rbcnames)
1066 1069 self._rbcsnameslen = len(bndata) # for verification before writing
1067 1070 if bndata:
1068 1071 self._names = [
1069 1072 encoding.tolocal(bn) for bn in bndata.split(b'\0')
1070 1073 ]
1071 1074 except (IOError, OSError):
1072 1075 if readonly:
1073 1076 # don't try to use cache - fall back to the slow path
1074 1077 self.branchinfo = self._branchinfo
1075 1078
1076 1079 if self._names:
1077 1080 try:
1078 1081 if repo.ui.configbool(b'storage', b'revbranchcache.mmap'):
1079 1082 with repo.cachevfs(_rbcrevs) as fp:
1080 1083 data = util.buffer(util.mmapread(fp))
1081 1084 else:
1082 1085 data = repo.cachevfs.read(_rbcrevs)
1083 1086 self._rbcrevs = rbcrevs(data)
1084 1087 except (IOError, OSError) as inst:
1085 1088 repo.ui.debug(
1086 1089 b"couldn't read revision branch cache: %s\n"
1087 1090 % stringutil.forcebytestr(inst)
1088 1091 )
1089 1092 # remember number of good records on disk
1090 1093 self._rbcrevslen = min(
1091 1094 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
1092 1095 )
1093 1096 if self._rbcrevslen == 0:
1094 1097 self._names = []
1095 1098 self._rbcnamescount = len(self._names) # number of names read at
1096 1099 # _rbcsnameslen
1097 1100
1098 1101 def _clear(self):
1099 1102 self._rbcsnameslen = 0
1100 1103 del self._names[:]
1101 1104 self._rbcnamescount = 0
1102 1105 self._rbcrevslen = len(self._repo.changelog)
1103 1106 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
1104 1107 util.clearcachedproperty(self, b'_namesreverse')
1105 1108
1106 1109 @util.propertycache
1107 1110 def _namesreverse(self):
1108 1111 return {b: r for r, b in enumerate(self._names)}
1109 1112
1110 1113 def branchinfo(self, rev):
1111 1114 """Return branch name and close flag for rev, using and updating
1112 1115 persistent cache."""
1113 1116 changelog = self._repo.changelog
1114 1117 rbcrevidx = rev * _rbcrecsize
1115 1118
1116 1119 # avoid negative index, changelog.read(nullrev) is fast without cache
1117 1120 if rev == nullrev:
1118 1121 return changelog.branchinfo(rev)
1119 1122
1120 1123 # if requested rev isn't allocated, grow and cache the rev info
1121 1124 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
1122 1125 return self._branchinfo(rev)
1123 1126
1124 1127 # fast path: extract data from cache, use it if node is matching
1125 1128 reponode = changelog.node(rev)[:_rbcnodelen]
1126 1129 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
1127 1130 close = bool(branchidx & _rbccloseflag)
1128 1131 if close:
1129 1132 branchidx &= _rbcbranchidxmask
1130 1133 if cachenode == b'\0\0\0\0':
1131 1134 pass
1132 1135 elif cachenode == reponode:
1133 1136 try:
1134 1137 return self._names[branchidx], close
1135 1138 except IndexError:
1136 1139 # recover from invalid reference to unknown branch
1137 1140 self._repo.ui.debug(
1138 1141 b"referenced branch names not found"
1139 1142 b" - rebuilding revision branch cache from scratch\n"
1140 1143 )
1141 1144 self._clear()
1142 1145 else:
1143 1146 # rev/node map has changed, invalidate the cache from here up
1144 1147 self._repo.ui.debug(
1145 1148 b"history modification detected - truncating "
1146 1149 b"revision branch cache to revision %d\n" % rev
1147 1150 )
1148 1151 truncate = rbcrevidx + _rbcrecsize
1149 1152 self._rbcrevs.truncate(truncate)
1150 1153 self._rbcrevslen = min(self._rbcrevslen, truncate)
1151 1154
1152 1155 # fall back to slow path and make sure it will be written to disk
1153 1156 return self._branchinfo(rev)
1154 1157
1155 1158 def _branchinfo(self, rev):
1156 1159 """Retrieve branch info from changelog and update _rbcrevs"""
1157 1160 changelog = self._repo.changelog
1158 1161 b, close = changelog.branchinfo(rev)
1159 1162 if b in self._namesreverse:
1160 1163 branchidx = self._namesreverse[b]
1161 1164 else:
1162 1165 branchidx = len(self._names)
1163 1166 self._names.append(b)
1164 1167 self._namesreverse[b] = branchidx
1165 1168 reponode = changelog.node(rev)
1166 1169 if close:
1167 1170 branchidx |= _rbccloseflag
1168 1171 self._setcachedata(rev, reponode, branchidx)
1169 1172 return b, close
1170 1173
1171 1174 def setdata(self, rev, changelogrevision):
1172 1175 """add new data information to the cache"""
1173 1176 branch, close = changelogrevision.branchinfo
1174 1177
1175 1178 if branch in self._namesreverse:
1176 1179 branchidx = self._namesreverse[branch]
1177 1180 else:
1178 1181 branchidx = len(self._names)
1179 1182 self._names.append(branch)
1180 1183 self._namesreverse[branch] = branchidx
1181 1184 if close:
1182 1185 branchidx |= _rbccloseflag
1183 1186 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
1184 1187 # If no cache data were readable (non exists, bad permission, etc)
1185 1188 # the cache was bypassing itself by setting:
1186 1189 #
1187 1190 # self.branchinfo = self._branchinfo
1188 1191 #
1189 1192 # Since we now have data in the cache, we need to drop this bypassing.
1190 1193 if 'branchinfo' in vars(self):
1191 1194 del self.branchinfo
1192 1195
1193 1196 def _setcachedata(self, rev, node, branchidx):
1194 1197 """Writes the node's branch data to the in-memory cache data."""
1195 1198 if rev == nullrev:
1196 1199 return
1197 1200 rbcrevidx = rev * _rbcrecsize
1198 1201 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
1199 1202 self._rbcrevslen = min(self._rbcrevslen, rev)
1200 1203
1201 1204 tr = self._repo.currenttransaction()
1202 1205 if tr:
1203 1206 tr.addfinalize(b'write-revbranchcache', self.write)
1204 1207
1205 1208 def write(self, tr=None):
1206 1209 """Save branch cache if it is dirty."""
1207 1210 repo = self._repo
1208 1211 wlock = None
1209 1212 step = b''
1210 1213 try:
1211 1214 # write the new names
1212 1215 if self._rbcnamescount < len(self._names):
1213 1216 wlock = repo.wlock(wait=False)
1214 1217 step = b' names'
1215 1218 self._writenames(repo)
1216 1219
1217 1220 # write the new revs
1218 1221 start = self._rbcrevslen * _rbcrecsize
1219 1222 if start != len(self._rbcrevs):
1220 1223 step = b''
1221 1224 if wlock is None:
1222 1225 wlock = repo.wlock(wait=False)
1223 1226 self._writerevs(repo, start)
1224 1227
1225 1228 except (IOError, OSError, error.Abort, error.LockError) as inst:
1226 1229 repo.ui.debug(
1227 1230 b"couldn't write revision branch cache%s: %s\n"
1228 1231 % (step, stringutil.forcebytestr(inst))
1229 1232 )
1230 1233 finally:
1231 1234 if wlock is not None:
1232 1235 wlock.release()
1233 1236
1234 1237 def _writenames(self, repo):
1235 1238 """write the new branch names to revbranchcache"""
1236 1239 if self._rbcnamescount != 0:
1237 1240 f = repo.cachevfs.open(_rbcnames, b'ab')
1238 1241 if f.tell() == self._rbcsnameslen:
1239 1242 f.write(b'\0')
1240 1243 else:
1241 1244 f.close()
1242 1245 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1243 1246 self._rbcnamescount = 0
1244 1247 self._rbcrevslen = 0
1245 1248 if self._rbcnamescount == 0:
1246 1249 # before rewriting names, make sure references are removed
1247 1250 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1248 1251 f = repo.cachevfs.open(_rbcnames, b'wb')
1249 1252 f.write(
1250 1253 b'\0'.join(
1251 1254 encoding.fromlocal(b)
1252 1255 for b in self._names[self._rbcnamescount :]
1253 1256 )
1254 1257 )
1255 1258 self._rbcsnameslen = f.tell()
1256 1259 f.close()
1257 1260 self._rbcnamescount = len(self._names)
1258 1261
1259 1262 def _writerevs(self, repo, start):
1260 1263 """write the new revs to revbranchcache"""
1261 1264 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1262 1265 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1263 1266 if f.tell() != start:
1264 1267 repo.ui.debug(
1265 1268 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1266 1269 )
1267 1270 f.seek(start)
1268 1271 if f.tell() != start:
1269 1272 start = 0
1270 1273 f.seek(start)
1271 1274 f.truncate()
1272 1275 end = revs * _rbcrecsize
1273 1276 f.write(self._rbcrevs.slice(start, end))
1274 1277 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now