##// END OF EJS Templates
branchcache: move the processing of the new data in a dedicated method...
marmoute -
r52428:03247e37 default
parent child Browse files
Show More
@@ -1,1277 +1,1299
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import struct
10 10
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullrev,
15 15 )
16 16
17 17 from typing import (
18 18 Any,
19 19 Callable,
20 20 Dict,
21 21 Iterable,
22 22 List,
23 23 Optional,
24 24 Set,
25 25 TYPE_CHECKING,
26 26 Tuple,
27 27 Union,
28 28 cast,
29 29 )
30 30
31 31 from . import (
32 32 encoding,
33 33 error,
34 34 obsolete,
35 35 scmutil,
36 36 util,
37 37 )
38 38
39 39 from .utils import (
40 40 repoviewutil,
41 41 stringutil,
42 42 )
43 43
44 44 if TYPE_CHECKING:
45 45 from . import localrepo
46 46
47 47 assert [localrepo]
48 48
49 49 subsettable = repoviewutil.subsettable
50 50
51 51 calcsize = struct.calcsize
52 52 pack_into = struct.pack_into
53 53 unpack_from = struct.unpack_from
54 54
55 55
56 56 class BranchMapCache:
57 57 """mapping of filtered views of repo with their branchcache"""
58 58
59 59 def __init__(self):
60 60 self._per_filter = {}
61 61
62 62 def __getitem__(self, repo):
63 63 self.updatecache(repo)
64 64 bcache = self._per_filter[repo.filtername]
65 65 assert bcache._filtername == repo.filtername, (
66 66 bcache._filtername,
67 67 repo.filtername,
68 68 )
69 69 return bcache
70 70
71 71 def update_disk(self, repo):
72 72 """ensure and up-to-date cache is (or will be) written on disk
73 73
74 74 The cache for this repository view is updated if needed and written on
75 75 disk.
76 76
77 77 If a transaction is in progress, the writing is schedule to transaction
78 78 close. See the `BranchMapCache.write_dirty` method.
79 79
80 80 This method exist independently of __getitem__ as it is sometime useful
81 81 to signal that we have no intend to use the data in memory yet.
82 82 """
83 83 self.updatecache(repo)
84 84 bcache = self._per_filter[repo.filtername]
85 85 assert bcache._filtername == repo.filtername, (
86 86 bcache._filtername,
87 87 repo.filtername,
88 88 )
89 89 tr = repo.currenttransaction()
90 90 if getattr(tr, 'finalized', True):
91 91 bcache.sync_disk(repo)
92 92
93 93 def updatecache(self, repo):
94 94 """Update the cache for the given filtered view on a repository"""
95 95 # This can trigger updates for the caches for subsets of the filtered
96 96 # view, e.g. when there is no cache for this filtered view or the cache
97 97 # is stale.
98 98
99 99 cl = repo.changelog
100 100 filtername = repo.filtername
101 101 bcache = self._per_filter.get(filtername)
102 102 if bcache is None or not bcache.validfor(repo):
103 103 # cache object missing or cache object stale? Read from disk
104 104 bcache = branch_cache_from_file(repo)
105 105
106 106 revs = []
107 107 if bcache is None:
108 108 # no (fresh) cache available anymore, perhaps we can re-use
109 109 # the cache for a subset, then extend that to add info on missing
110 110 # revisions.
111 111 subsetname = subsettable.get(filtername)
112 112 if subsetname is not None:
113 113 subset = repo.filtered(subsetname)
114 114 self.updatecache(subset)
115 115 bcache = self._per_filter[subset.filtername].inherit_for(repo)
116 116 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
117 117 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
118 118 else:
119 119 # nothing to fall back on, start empty.
120 120 bcache = new_branch_cache(repo)
121 121
122 122 revs.extend(cl.revs(start=bcache.tiprev + 1))
123 123 if revs:
124 124 bcache.update(repo, revs)
125 125
126 126 assert bcache.validfor(repo), filtername
127 127 self._per_filter[repo.filtername] = bcache
128 128
129 129 def replace(self, repo, remotebranchmap):
130 130 """Replace the branchmap cache for a repo with a branch mapping.
131 131
132 132 This is likely only called during clone with a branch map from a
133 133 remote.
134 134
135 135 """
136 136 cl = repo.changelog
137 137 clrev = cl.rev
138 138 clbranchinfo = cl.branchinfo
139 139 rbheads = []
140 140 closed = set()
141 141 for bheads in remotebranchmap.values():
142 142 rbheads += bheads
143 143 for h in bheads:
144 144 r = clrev(h)
145 145 b, c = clbranchinfo(r)
146 146 if c:
147 147 closed.add(h)
148 148
149 149 if rbheads:
150 150 rtiprev = max((int(clrev(node)) for node in rbheads))
151 151 cache = new_branch_cache(
152 152 repo,
153 153 remotebranchmap,
154 154 repo[rtiprev].node(),
155 155 rtiprev,
156 156 closednodes=closed,
157 157 )
158 158
159 159 # Try to stick it as low as possible
160 160 # filter above served are unlikely to be fetch from a clone
161 161 for candidate in (b'base', b'immutable', b'served'):
162 162 rview = repo.filtered(candidate)
163 163 if cache.validfor(rview):
164 164 cache._filtername = candidate
165 165 self._per_filter[candidate] = cache
166 166 cache._state = STATE_DIRTY
167 167 cache.write(rview)
168 168 return
169 169
170 170 def clear(self):
171 171 self._per_filter.clear()
172 172
173 173 def write_dirty(self, repo):
174 174 unfi = repo.unfiltered()
175 175 for filtername in repoviewutil.get_ordered_subset():
176 176 cache = self._per_filter.get(filtername)
177 177 if cache is None:
178 178 continue
179 179 if filtername is None:
180 180 repo = unfi
181 181 else:
182 182 repo = unfi.filtered(filtername)
183 183 cache.sync_disk(repo)
184 184
185 185
186 186 def _unknownnode(node):
187 187 """raises ValueError when branchcache found a node which does not exists"""
188 188 raise ValueError('node %s does not exist' % node.hex())
189 189
190 190
191 191 def _branchcachedesc(repo):
192 192 if repo.filtername is not None:
193 193 return b'branch cache (%s)' % repo.filtername
194 194 else:
195 195 return b'branch cache'
196 196
197 197
198 198 class _BaseBranchCache:
199 199 """A dict like object that hold branches heads cache.
200 200
201 201 This cache is used to avoid costly computations to determine all the
202 202 branch heads of a repo.
203 203 """
204 204
205 205 def __init__(
206 206 self,
207 207 repo: "localrepo.localrepository",
208 208 entries: Union[
209 209 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
210 210 ] = (),
211 211 closed_nodes: Optional[Set[bytes]] = None,
212 212 ) -> None:
213 213 """hasnode is a function which can be used to verify whether changelog
214 214 has a given node or not. If it's not provided, we assume that every node
215 215 we have exists in changelog"""
216 216 # closednodes is a set of nodes that close their branch. If the branch
217 217 # cache has been updated, it may contain nodes that are no longer
218 218 # heads.
219 219 if closed_nodes is None:
220 220 closed_nodes = set()
221 221 self._closednodes = set(closed_nodes)
222 222 self._entries = dict(entries)
223 223
224 224 def __iter__(self):
225 225 return iter(self._entries)
226 226
227 227 def __setitem__(self, key, value):
228 228 self._entries[key] = value
229 229
230 230 def __getitem__(self, key):
231 231 return self._entries[key]
232 232
233 233 def __contains__(self, key):
234 234 return key in self._entries
235 235
236 236 def iteritems(self):
237 237 return self._entries.items()
238 238
239 239 items = iteritems
240 240
241 241 def hasbranch(self, label):
242 242 """checks whether a branch of this name exists or not"""
243 243 return label in self._entries
244 244
245 245 def _branchtip(self, heads):
246 246 """Return tuple with last open head in heads and false,
247 247 otherwise return last closed head and true."""
248 248 tip = heads[-1]
249 249 closed = True
250 250 for h in reversed(heads):
251 251 if h not in self._closednodes:
252 252 tip = h
253 253 closed = False
254 254 break
255 255 return tip, closed
256 256
257 257 def branchtip(self, branch):
258 258 """Return the tipmost open head on branch head, otherwise return the
259 259 tipmost closed head on branch.
260 260 Raise KeyError for unknown branch."""
261 261 return self._branchtip(self[branch])[0]
262 262
263 263 def iteropen(self, nodes):
264 264 return (n for n in nodes if n not in self._closednodes)
265 265
266 266 def branchheads(self, branch, closed=False):
267 267 heads = self._entries[branch]
268 268 if not closed:
269 269 heads = list(self.iteropen(heads))
270 270 return heads
271 271
272 272 def iterbranches(self):
273 273 for bn, heads in self.items():
274 274 yield (bn, heads) + self._branchtip(heads)
275 275
276 276 def iterheads(self):
277 277 """returns all the heads"""
278 278 return self._entries.values()
279 279
280 280 def update(self, repo, revgen):
281 281 """Given a branchhead cache, self, that may have extra nodes or be
282 282 missing heads, and a generator of nodes that are strictly a superset of
283 283 heads missing, this function updates self to be correct.
284 284 """
285 285 starttime = util.timer()
286 286 cl = repo.changelog
287 287 # Faster than using ctx.obsolete()
288 288 obsrevs = obsolete.getrevs(repo, b'obsolete')
289 289 # collect new branch entries
290 290 newbranches = {}
291 291 new_closed = set()
292 292 obs_ignored = set()
293 293 getbranchinfo = repo.revbranchcache().branchinfo
294 294 max_rev = -1
295 295 for r in revgen:
296 296 max_rev = max(max_rev, r)
297 297 if r in obsrevs:
298 298 # We ignore obsolete changesets as they shouldn't be
299 299 # considered heads.
300 300 obs_ignored.add(r)
301 301 continue
302 302 branch, closesbranch = getbranchinfo(r)
303 303 newbranches.setdefault(branch, []).append(r)
304 304 if closesbranch:
305 305 new_closed.add(r)
306 306 if max_rev < 0:
307 307 msg = "running branchcache.update without revision to update"
308 308 raise error.ProgrammingError(msg)
309 309
310 self._process_new(
311 repo,
312 newbranches,
313 new_closed,
314 obs_ignored,
315 max_rev,
316 )
317
318 self._closednodes.update(cl.node(rev) for rev in new_closed)
319
320 duration = util.timer() - starttime
321 repo.ui.log(
322 b'branchcache',
323 b'updated %s in %.4f seconds\n',
324 _branchcachedesc(repo),
325 duration,
326 )
327 return max_rev
328
329 def _process_new(
330 self,
331 repo,
332 newbranches,
333 new_closed,
334 obs_ignored,
335 max_rev,
336 ):
337 """update the branchmap from a set of new information"""
310 338 # Delay fetching the topological heads until they are needed.
311 339 # A repository without non-continous branches can skip this part.
312 340 topoheads = None
313 341
342 cl = repo.changelog
343 getbranchinfo = repo.revbranchcache().branchinfo
344 # Faster than using ctx.obsolete()
345 obsrevs = obsolete.getrevs(repo, b'obsolete')
346
314 347 # If a changeset is visible, its parents must be visible too, so
315 348 # use the faster unfiltered parent accessor.
316 parentrevs = repo.unfiltered().changelog.parentrevs
349 parentrevs = cl._uncheckedparentrevs
317 350
318 351 for branch, newheadrevs in newbranches.items():
319 352 # For every branch, compute the new branchheads.
320 353 # A branchhead is a revision such that no descendant is on
321 354 # the same branch.
322 355 #
323 356 # The branchheads are computed iteratively in revision order.
324 357 # This ensures topological order, i.e. parents are processed
325 358 # before their children. Ancestors are inclusive here, i.e.
326 359 # any revision is an ancestor of itself.
327 360 #
328 361 # Core observations:
329 362 # - The current revision is always a branchhead for the
330 363 # repository up to that point.
331 364 # - It is the first revision of the branch if and only if
332 365 # there was no branchhead before. In that case, it is the
333 366 # only branchhead as there are no possible ancestors on
334 367 # the same branch.
335 368 # - If a parent is on the same branch, a branchhead can
336 369 # only be an ancestor of that parent, if it is parent
337 370 # itself. Otherwise it would have been removed as ancestor
338 371 # of that parent before.
339 372 # - Therefore, if all parents are on the same branch, they
340 373 # can just be removed from the branchhead set.
341 374 # - If one parent is on the same branch and the other is not
342 375 # and there was exactly one branchhead known, the existing
343 376 # branchhead can only be an ancestor if it is the parent.
344 377 # Otherwise it would have been removed as ancestor of
345 378 # the parent before. The other parent therefore can't have
346 379 # a branchhead as ancestor.
347 380 # - In all other cases, the parents on different branches
348 381 # could have a branchhead as ancestor. Those parents are
349 382 # kept in the "uncertain" set. If all branchheads are also
350 383 # topological heads, they can't have descendants and further
351 384 # checks can be skipped. Otherwise, the ancestors of the
352 385 # "uncertain" set are removed from branchheads.
353 386 # This computation is heavy and avoided if at all possible.
354 387 bheads = self._entries.get(branch, [])
355 388 bheadset = {cl.rev(node) for node in bheads}
356 389 uncertain = set()
357 390 for newrev in sorted(newheadrevs):
358 391 if not bheadset:
359 392 bheadset.add(newrev)
360 393 continue
361 394
362 395 parents = [p for p in parentrevs(newrev) if p != nullrev]
363 396 samebranch = set()
364 397 otherbranch = set()
365 398 obsparents = set()
366 399 for p in parents:
367 400 if p in obsrevs:
368 401 # We ignored this obsolete changeset earlier, but now
369 402 # that it has non-ignored children, we need to make
370 403 # sure their ancestors are not considered heads. To
371 404 # achieve that, we will simply treat this obsolete
372 405 # changeset as a parent from other branch.
373 406 obsparents.add(p)
374 407 elif p in bheadset or getbranchinfo(p)[0] == branch:
375 408 samebranch.add(p)
376 409 else:
377 410 otherbranch.add(p)
378 411 if not (len(bheadset) == len(samebranch) == 1):
379 412 uncertain.update(otherbranch)
380 413 uncertain.update(obsparents)
381 414 bheadset.difference_update(samebranch)
382 415 bheadset.add(newrev)
383 416
384 417 if uncertain:
385 418 if topoheads is None:
386 419 topoheads = set(cl.headrevs())
387 420 if bheadset - topoheads:
388 421 floorrev = min(bheadset)
389 422 if floorrev <= max(uncertain):
390 423 ancestors = set(cl.ancestors(uncertain, floorrev))
391 424 bheadset -= ancestors
392 425 if bheadset:
393 426 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
394 427
395 self._closednodes.update(cl.node(rev) for rev in new_closed)
396
397 duration = util.timer() - starttime
398 repo.ui.log(
399 b'branchcache',
400 b'updated %s in %.4f seconds\n',
401 _branchcachedesc(repo),
402 duration,
403 )
404 return max_rev
405
406 428
407 429 STATE_CLEAN = 1
408 430 STATE_INHERITED = 2
409 431 STATE_DIRTY = 3
410 432
411 433
412 434 class _LocalBranchCache(_BaseBranchCache):
413 435 """base class of branch-map info for a local repo or repoview"""
414 436
415 437 _base_filename = None
416 438 _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ())
417 439
418 440 def __init__(
419 441 self,
420 442 repo: "localrepo.localrepository",
421 443 entries: Union[
422 444 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
423 445 ] = (),
424 446 tipnode: Optional[bytes] = None,
425 447 tiprev: Optional[int] = nullrev,
426 448 key_hashes: Optional[Tuple[bytes]] = None,
427 449 closednodes: Optional[Set[bytes]] = None,
428 450 hasnode: Optional[Callable[[bytes], bool]] = None,
429 451 verify_node: bool = False,
430 452 inherited: bool = False,
431 453 ) -> None:
432 454 """hasnode is a function which can be used to verify whether changelog
433 455 has a given node or not. If it's not provided, we assume that every node
434 456 we have exists in changelog"""
435 457 self._filtername = repo.filtername
436 458 if tipnode is None:
437 459 self.tipnode = repo.nullid
438 460 else:
439 461 self.tipnode = tipnode
440 462 self.tiprev = tiprev
441 463 if key_hashes is None:
442 464 self.key_hashes = self._default_key_hashes
443 465 else:
444 466 self.key_hashes = key_hashes
445 467 self._state = STATE_CLEAN
446 468 if inherited:
447 469 self._state = STATE_INHERITED
448 470
449 471 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
450 472 # closednodes is a set of nodes that close their branch. If the branch
451 473 # cache has been updated, it may contain nodes that are no longer
452 474 # heads.
453 475
454 476 # Do we need to verify branch at all ?
455 477 self._verify_node = verify_node
456 478 # branches for which nodes are verified
457 479 self._verifiedbranches = set()
458 480 self._hasnode = None
459 481 if self._verify_node:
460 482 self._hasnode = repo.changelog.hasnode
461 483
462 484 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
463 485 raise NotImplementedError
464 486
465 487 def validfor(self, repo):
466 488 """check that cache contents are valid for (a subset of) this repo
467 489
468 490 - False when the order of changesets changed or if we detect a strip.
469 491 - True when cache is up-to-date for the current repo or its subset."""
470 492 try:
471 493 node = repo.changelog.node(self.tiprev)
472 494 except IndexError:
473 495 # changesets were stripped and now we don't even have enough to
474 496 # find tiprev
475 497 return False
476 498 if self.tipnode != node:
477 499 # tiprev doesn't correspond to tipnode: repo was stripped, or this
478 500 # repo has a different order of changesets
479 501 return False
480 502 repo_key_hashes = self._compute_key_hashes(repo)
481 503 # hashes don't match if this repo view has a different set of filtered
482 504 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
483 505 # history was rewritten)
484 506 return self.key_hashes == repo_key_hashes
485 507
486 508 @classmethod
487 509 def fromfile(cls, repo):
488 510 f = None
489 511 try:
490 512 f = repo.cachevfs(cls._filename(repo))
491 513 lineiter = iter(f)
492 514 init_kwargs = cls._load_header(repo, lineiter)
493 515 bcache = cls(
494 516 repo,
495 517 verify_node=True,
496 518 **init_kwargs,
497 519 )
498 520 if not bcache.validfor(repo):
499 521 # invalidate the cache
500 522 raise ValueError('tip differs')
501 523 bcache._load_heads(repo, lineiter)
502 524 except (IOError, OSError):
503 525 return None
504 526
505 527 except Exception as inst:
506 528 if repo.ui.debugflag:
507 529 msg = b'invalid %s: %s\n'
508 530 msg %= (
509 531 _branchcachedesc(repo),
510 532 stringutil.forcebytestr(inst),
511 533 )
512 534 repo.ui.debug(msg)
513 535 bcache = None
514 536
515 537 finally:
516 538 if f:
517 539 f.close()
518 540
519 541 return bcache
520 542
521 543 @classmethod
522 544 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
523 545 raise NotImplementedError
524 546
525 547 def _load_heads(self, repo, lineiter):
526 548 """fully loads the branchcache by reading from the file using the line
527 549 iterator passed"""
528 550 for line in lineiter:
529 551 line = line.rstrip(b'\n')
530 552 if not line:
531 553 continue
532 554 node, state, label = line.split(b" ", 2)
533 555 if state not in b'oc':
534 556 raise ValueError('invalid branch state')
535 557 label = encoding.tolocal(label.strip())
536 558 node = bin(node)
537 559 self._entries.setdefault(label, []).append(node)
538 560 if state == b'c':
539 561 self._closednodes.add(node)
540 562
541 563 @classmethod
542 564 def _filename(cls, repo):
543 565 """name of a branchcache file for a given repo or repoview"""
544 566 filename = cls._base_filename
545 567 assert filename is not None
546 568 if repo.filtername:
547 569 filename = b'%s-%s' % (filename, repo.filtername)
548 570 return filename
549 571
550 572 def inherit_for(self, repo):
551 573 """return a deep copy of the branchcache object"""
552 574 assert repo.filtername != self._filtername
553 575 other = type(self)(
554 576 repo=repo,
555 577 # we always do a shally copy of self._entries, and the values is
556 578 # always replaced, so no need to deepcopy until the above remains
557 579 # true.
558 580 entries=self._entries,
559 581 tipnode=self.tipnode,
560 582 tiprev=self.tiprev,
561 583 key_hashes=self.key_hashes,
562 584 closednodes=set(self._closednodes),
563 585 verify_node=self._verify_node,
564 586 inherited=True,
565 587 )
566 588 # also copy information about the current verification state
567 589 other._verifiedbranches = set(self._verifiedbranches)
568 590 return other
569 591
570 592 def sync_disk(self, repo):
571 593 """synchronise the on disk file with the cache state
572 594
573 595 If new value specific to this filter level need to be written, the file
574 596 will be updated, if the state of the branchcache is inherited from a
575 597 subset, any stalled on disk file will be deleted.
576 598
577 599 That method does nothing if there is nothing to do.
578 600 """
579 601 if self._state == STATE_DIRTY:
580 602 self.write(repo)
581 603 elif self._state == STATE_INHERITED:
582 604 filename = self._filename(repo)
583 605 repo.cachevfs.tryunlink(filename)
584 606
585 607 def write(self, repo):
586 608 assert self._filtername == repo.filtername, (
587 609 self._filtername,
588 610 repo.filtername,
589 611 )
590 612 assert self._state == STATE_DIRTY, self._state
591 613 # This method should not be called during an open transaction
592 614 tr = repo.currenttransaction()
593 615 if not getattr(tr, 'finalized', True):
594 616 msg = "writing branchcache in the middle of a transaction"
595 617 raise error.ProgrammingError(msg)
596 618 try:
597 619 filename = self._filename(repo)
598 620 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
599 621 self._write_header(f)
600 622 nodecount = self._write_heads(repo, f)
601 623 repo.ui.log(
602 624 b'branchcache',
603 625 b'wrote %s with %d labels and %d nodes\n',
604 626 _branchcachedesc(repo),
605 627 len(self._entries),
606 628 nodecount,
607 629 )
608 630 self._state = STATE_CLEAN
609 631 except (IOError, OSError, error.Abort) as inst:
610 632 # Abort may be raised by read only opener, so log and continue
611 633 repo.ui.debug(
612 634 b"couldn't write branch cache: %s\n"
613 635 % stringutil.forcebytestr(inst)
614 636 )
615 637
616 638 def _write_header(self, fp) -> None:
617 639 raise NotImplementedError
618 640
619 641 def _write_heads(self, repo, fp) -> int:
620 642 """write list of heads to a file
621 643
622 644 Return the number of heads written."""
623 645 nodecount = 0
624 646 for label, nodes in sorted(self._entries.items()):
625 647 label = encoding.fromlocal(label)
626 648 for node in nodes:
627 649 nodecount += 1
628 650 if node in self._closednodes:
629 651 state = b'c'
630 652 else:
631 653 state = b'o'
632 654 fp.write(b"%s %s %s\n" % (hex(node), state, label))
633 655 return nodecount
634 656
635 657 def _verifybranch(self, branch):
636 658 """verify head nodes for the given branch."""
637 659 if not self._verify_node:
638 660 return
639 661 if branch not in self._entries or branch in self._verifiedbranches:
640 662 return
641 663 assert self._hasnode is not None
642 664 for n in self._entries[branch]:
643 665 if not self._hasnode(n):
644 666 _unknownnode(n)
645 667
646 668 self._verifiedbranches.add(branch)
647 669
648 670 def _verifyall(self):
649 671 """verifies nodes of all the branches"""
650 672 for b in self._entries.keys():
651 673 if b not in self._verifiedbranches:
652 674 self._verifybranch(b)
653 675
654 676 def __getitem__(self, key):
655 677 self._verifybranch(key)
656 678 return super().__getitem__(key)
657 679
658 680 def __contains__(self, key):
659 681 self._verifybranch(key)
660 682 return super().__contains__(key)
661 683
662 684 def iteritems(self):
663 685 self._verifyall()
664 686 return super().iteritems()
665 687
666 688 items = iteritems
667 689
668 690 def iterheads(self):
669 691 """returns all the heads"""
670 692 self._verifyall()
671 693 return super().iterheads()
672 694
673 695 def hasbranch(self, label):
674 696 """checks whether a branch of this name exists or not"""
675 697 self._verifybranch(label)
676 698 return super().hasbranch(label)
677 699
678 700 def branchheads(self, branch, closed=False):
679 701 self._verifybranch(branch)
680 702 return super().branchheads(branch, closed=closed)
681 703
682 704 def update(self, repo, revgen):
683 705 assert self._filtername == repo.filtername, (
684 706 self._filtername,
685 707 repo.filtername,
686 708 )
687 709 cl = repo.changelog
688 710 max_rev = super().update(repo, revgen)
689 711 # new tip revision which we found after iterating items from new
690 712 # branches
691 713 if max_rev is not None and max_rev > self.tiprev:
692 714 self.tiprev = max_rev
693 715 self.tipnode = cl.node(max_rev)
694 716 else:
695 717 # We should not be here is if this is false
696 718 assert cl.node(self.tiprev) == self.tipnode
697 719
698 720 if not self.validfor(repo):
699 721 # the tiprev and tipnode should be aligned, so if the current repo
700 722 # is not seens as valid this is because old cache key is now
701 723 # invalid for the repo.
702 724 #
703 725 # However. we've just updated the cache and we assume it's valid,
704 726 # so let's make the cache key valid as well by recomputing it from
705 727 # the cached data
706 728 self.key_hashes = self._compute_key_hashes(repo)
707 729 self.filteredhash = scmutil.combined_filtered_and_obsolete_hash(
708 730 repo,
709 731 self.tiprev,
710 732 )
711 733
712 734 self._state = STATE_DIRTY
713 735 tr = repo.currenttransaction()
714 736 if getattr(tr, 'finalized', True):
715 737 # Avoid premature writing.
716 738 #
717 739 # (The cache warming setup by localrepo will update the file later.)
718 740 self.write(repo)
719 741
720 742
721 743 def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
722 744 """Build a branch cache from on-disk data if possible
723 745
724 746 Return a branch cache of the right format depending of the repository.
725 747 """
726 748 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
727 749 return BranchCacheV3.fromfile(repo)
728 750 else:
729 751 return BranchCacheV2.fromfile(repo)
730 752
731 753
732 754 def new_branch_cache(repo, *args, **kwargs):
733 755 """Build a new branch cache from argument
734 756
735 757 Return a branch cache of the right format depending of the repository.
736 758 """
737 759 if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
738 760 return BranchCacheV3(repo, *args, **kwargs)
739 761 else:
740 762 return BranchCacheV2(repo, *args, **kwargs)
741 763
742 764
743 765 class BranchCacheV2(_LocalBranchCache):
744 766 """a branch cache using version 2 of the format on disk
745 767
746 768 The cache is serialized on disk in the following format:
747 769
748 770 <tip hex node> <tip rev number> [optional filtered repo hex hash]
749 771 <branch head hex node> <open/closed state> <branch name>
750 772 <branch head hex node> <open/closed state> <branch name>
751 773 ...
752 774
753 775 The first line is used to check if the cache is still valid. If the
754 776 branch cache is for a filtered repo view, an optional third hash is
755 777 included that hashes the hashes of all filtered and obsolete revisions.
756 778
757 779 The open/closed state is represented by a single letter 'o' or 'c'.
758 780 This field can be used to avoid changelog reads when determining if a
759 781 branch head closes a branch or not.
760 782 """
761 783
762 784 _base_filename = b"branch2"
763 785
764 786 @classmethod
765 787 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
766 788 """parse the head of a branchmap file
767 789
768 790 return parameters to pass to a newly created class instance.
769 791 """
770 792 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
771 793 last, lrev = cachekey[:2]
772 794 last, lrev = bin(last), int(lrev)
773 795 filteredhash = ()
774 796 if len(cachekey) > 2:
775 797 filteredhash = (bin(cachekey[2]),)
776 798 return {
777 799 "tipnode": last,
778 800 "tiprev": lrev,
779 801 "key_hashes": filteredhash,
780 802 }
781 803
782 804 def _write_header(self, fp) -> None:
783 805 """write the branch cache header to a file"""
784 806 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
785 807 if self.key_hashes:
786 808 cachekey.append(hex(self.key_hashes[0]))
787 809 fp.write(b" ".join(cachekey) + b'\n')
788 810
789 811 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
790 812 """return the cache key hashes that match this repoview state"""
791 813 filtered_hash = scmutil.combined_filtered_and_obsolete_hash(
792 814 repo,
793 815 self.tiprev,
794 816 needobsolete=True,
795 817 )
796 818 keys: Tuple[bytes] = cast(Tuple[bytes], ())
797 819 if filtered_hash is not None:
798 820 keys: Tuple[bytes] = (filtered_hash,)
799 821 return keys
800 822
801 823
802 824 class BranchCacheV3(_LocalBranchCache):
803 825 """a branch cache using version 3 of the format on disk
804 826
805 827 This version is still EXPERIMENTAL and the format is subject to changes.
806 828
807 829 The cache is serialized on disk in the following format:
808 830
809 831 <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> […]
810 832 <branch head hex node> <open/closed state> <branch name>
811 833 <branch head hex node> <open/closed state> <branch name>
812 834 ...
813 835
814 836 The first line is used to check if the cache is still valid. It is a series
815 837 of key value pair. The following key are recognized:
816 838
817 839 - tip-rev: the rev-num of the tip-most revision seen by this cache
818 840 - tip-node: the node-id of the tip-most revision sen by this cache
819 841 - filtered-hash: the hash of all filtered revisions (before tip-rev)
820 842 ignored by this cache.
821 843 - obsolete-hash: the hash of all non-filtered obsolete revisions (before
822 844 tip-rev) ignored by this cache.
823 845
824 846 The tip-rev is used to know how far behind the value in the file are
825 847 compared to the current repository state.
826 848
827 849 The tip-node, filtered-hash and obsolete-hash are used to detect if this
828 850 cache can be used for this repository state at all.
829 851
830 852 The open/closed state is represented by a single letter 'o' or 'c'.
831 853 This field can be used to avoid changelog reads when determining if a
832 854 branch head closes a branch or not.
833 855
834 856 Topological heads are not included in the listing and should be dispatched
835 857 on the right branch at read time. Obsolete topological heads should be
836 858 ignored.
837 859 """
838 860
839 861 _base_filename = b"branch3"
840 862 _default_key_hashes = (None, None)
841 863
842 864 def _get_topo_heads(self, repo) -> List[int]:
843 865 """returns the topological head of a repoview content up to self.tiprev"""
844 866 cl = repo.changelog
845 867 if self.tiprev == nullrev:
846 868 return []
847 869 elif self.tiprev == cl.tiprev():
848 870 return cl.headrevs()
849 871 else:
850 872 # XXX passing tiprev as ceiling of cl.headrevs could be faster
851 873 heads = cl.headrevs(cl.revs(stop=self.tiprev))
852 874 return heads
853 875
854 876 def _write_header(self, fp) -> None:
855 877 cache_keys = {
856 878 b"tip-node": hex(self.tipnode),
857 879 b"tip-rev": b'%d' % self.tiprev,
858 880 }
859 881 if self.key_hashes:
860 882 if self.key_hashes[0] is not None:
861 883 cache_keys[b"filtered-hash"] = hex(self.key_hashes[0])
862 884 if self.key_hashes[1] is not None:
863 885 cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1])
864 886 pieces = (b"%s=%s" % i for i in sorted(cache_keys.items()))
865 887 fp.write(b" ".join(pieces) + b'\n')
866 888
867 889 def _write_heads(self, repo, fp) -> int:
868 890 """write list of heads to a file
869 891
870 892 Return the number of heads written."""
871 893 nodecount = 0
872 894 topo_heads = set(self._get_topo_heads(repo))
873 895 to_rev = repo.changelog.index.rev
874 896 for label, nodes in sorted(self._entries.items()):
875 897 label = encoding.fromlocal(label)
876 898 for node in nodes:
877 899 rev = to_rev(node)
878 900 if rev in topo_heads:
879 901 continue
880 902 if node in self._closednodes:
881 903 state = b'c'
882 904 else:
883 905 state = b'o'
884 906 nodecount += 1
885 907 fp.write(b"%s %s %s\n" % (hex(node), state, label))
886 908 return nodecount
887 909
888 910 @classmethod
889 911 def _load_header(cls, repo, lineiter):
890 912 header_line = next(lineiter)
891 913 pieces = header_line.rstrip(b'\n').split(b" ")
892 914 cache_keys = dict(p.split(b'=', 1) for p in pieces)
893 915
894 916 args = {}
895 917 filtered_hash = None
896 918 obsolete_hash = None
897 919 for k, v in cache_keys.items():
898 920 if k == b"tip-rev":
899 921 args["tiprev"] = int(v)
900 922 elif k == b"tip-node":
901 923 args["tipnode"] = bin(v)
902 924 elif k == b"filtered-hash":
903 925 filtered_hash = bin(v)
904 926 elif k == b"obsolete-hash":
905 927 obsolete_hash = bin(v)
906 928 else:
907 929 msg = b"unknown cache key: %r" % k
908 930 raise ValueError(msg)
909 931 args["key_hashes"] = (filtered_hash, obsolete_hash)
910 932 return args
911 933
912 934 def _load_heads(self, repo, lineiter):
913 935 """fully loads the branchcache by reading from the file using the line
914 936 iterator passed"""
915 937 super()._load_heads(repo, lineiter)
916 938 cl = repo.changelog
917 939 getbranchinfo = repo.revbranchcache().branchinfo
918 940 obsrevs = obsolete.getrevs(repo, b'obsolete')
919 941 to_node = cl.node
920 942 touched_branch = set()
921 943 for head in self._get_topo_heads(repo):
922 944 if head in obsrevs:
923 945 continue
924 946 node = to_node(head)
925 947 branch, closed = getbranchinfo(head)
926 948 self._entries.setdefault(branch, []).append(node)
927 949 if closed:
928 950 self._closednodes.add(node)
929 951 touched_branch.add(branch)
930 952 to_rev = cl.index.rev
931 953 for branch in touched_branch:
932 954 self._entries[branch].sort(key=to_rev)
933 955
934 956 def _compute_key_hashes(self, repo) -> Tuple[bytes]:
935 957 """return the cache key hashes that match this repoview state"""
936 958 return scmutil.filtered_and_obsolete_hash(
937 959 repo,
938 960 self.tiprev,
939 961 )
940 962
941 963
942 964 class remotebranchcache(_BaseBranchCache):
943 965 """Branchmap info for a remote connection, should not write locally"""
944 966
945 967 def __init__(
946 968 self,
947 969 repo: "localrepo.localrepository",
948 970 entries: Union[
949 971 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
950 972 ] = (),
951 973 closednodes: Optional[Set[bytes]] = None,
952 974 ) -> None:
953 975 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
954 976
955 977
956 978 # Revision branch info cache
957 979
958 980 _rbcversion = b'-v1'
959 981 _rbcnames = b'rbc-names' + _rbcversion
960 982 _rbcrevs = b'rbc-revs' + _rbcversion
961 983 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
962 984 _rbcrecfmt = b'>4sI'
963 985 _rbcrecsize = calcsize(_rbcrecfmt)
964 986 _rbcmininc = 64 * _rbcrecsize
965 987 _rbcnodelen = 4
966 988 _rbcbranchidxmask = 0x7FFFFFFF
967 989 _rbccloseflag = 0x80000000
968 990
969 991
970 992 class rbcrevs:
971 993 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
972 994
973 995 def __init__(self, revs):
974 996 self._prefix = revs
975 997 self._rest = bytearray()
976 998
977 999 def __len__(self):
978 1000 return len(self._prefix) + len(self._rest)
979 1001
980 1002 def unpack_record(self, rbcrevidx):
981 1003 if rbcrevidx < len(self._prefix):
982 1004 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
983 1005 else:
984 1006 return unpack_from(
985 1007 _rbcrecfmt,
986 1008 util.buffer(self._rest),
987 1009 rbcrevidx - len(self._prefix),
988 1010 )
989 1011
990 1012 def make_mutable(self):
991 1013 if len(self._prefix) > 0:
992 1014 entirety = bytearray()
993 1015 entirety[:] = self._prefix
994 1016 entirety.extend(self._rest)
995 1017 self._rest = entirety
996 1018 self._prefix = bytearray()
997 1019
998 1020 def truncate(self, pos):
999 1021 self.make_mutable()
1000 1022 del self._rest[pos:]
1001 1023
1002 1024 def pack_into(self, rbcrevidx, node, branchidx):
1003 1025 if rbcrevidx < len(self._prefix):
1004 1026 self.make_mutable()
1005 1027 buf = self._rest
1006 1028 start_offset = rbcrevidx - len(self._prefix)
1007 1029 end_offset = start_offset + _rbcrecsize
1008 1030
1009 1031 if len(self._rest) < end_offset:
1010 1032 # bytearray doesn't allocate extra space at least in Python 3.7.
1011 1033 # When multiple changesets are added in a row, precise resize would
1012 1034 # result in quadratic complexity. Overallocate to compensate by
1013 1035 # using the classic doubling technique for dynamic arrays instead.
1014 1036 # If there was a gap in the map before, less space will be reserved.
1015 1037 self._rest.extend(b'\0' * end_offset)
1016 1038 return pack_into(
1017 1039 _rbcrecfmt,
1018 1040 buf,
1019 1041 start_offset,
1020 1042 node,
1021 1043 branchidx,
1022 1044 )
1023 1045
1024 1046 def extend(self, extension):
1025 1047 return self._rest.extend(extension)
1026 1048
1027 1049 def slice(self, begin, end):
1028 1050 if begin < len(self._prefix):
1029 1051 acc = bytearray()
1030 1052 acc[:] = self._prefix[begin:end]
1031 1053 acc.extend(
1032 1054 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1033 1055 )
1034 1056 return acc
1035 1057 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
1036 1058
1037 1059
1038 1060 class revbranchcache:
1039 1061 """Persistent cache, mapping from revision number to branch name and close.
1040 1062 This is a low level cache, independent of filtering.
1041 1063
1042 1064 Branch names are stored in rbc-names in internal encoding separated by 0.
1043 1065 rbc-names is append-only, and each branch name is only stored once and will
1044 1066 thus have a unique index.
1045 1067
1046 1068 The branch info for each revision is stored in rbc-revs as constant size
1047 1069 records. The whole file is read into memory, but it is only 'parsed' on
1048 1070 demand. The file is usually append-only but will be truncated if repo
1049 1071 modification is detected.
1050 1072 The record for each revision contains the first 4 bytes of the
1051 1073 corresponding node hash, and the record is only used if it still matches.
1052 1074 Even a completely trashed rbc-revs fill thus still give the right result
1053 1075 while converging towards full recovery ... assuming no incorrectly matching
1054 1076 node hashes.
1055 1077 The record also contains 4 bytes where 31 bits contains the index of the
1056 1078 branch and the last bit indicate that it is a branch close commit.
1057 1079 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
1058 1080 and will grow with it but be 1/8th of its size.
1059 1081 """
1060 1082
1061 1083 def __init__(self, repo, readonly=True):
1062 1084 assert repo.filtername is None
1063 1085 self._repo = repo
1064 1086 self._names = [] # branch names in local encoding with static index
1065 1087 self._rbcrevs = rbcrevs(bytearray())
1066 1088 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
1067 1089 try:
1068 1090 bndata = repo.cachevfs.read(_rbcnames)
1069 1091 self._rbcsnameslen = len(bndata) # for verification before writing
1070 1092 if bndata:
1071 1093 self._names = [
1072 1094 encoding.tolocal(bn) for bn in bndata.split(b'\0')
1073 1095 ]
1074 1096 except (IOError, OSError):
1075 1097 if readonly:
1076 1098 # don't try to use cache - fall back to the slow path
1077 1099 self.branchinfo = self._branchinfo
1078 1100
1079 1101 if self._names:
1080 1102 try:
1081 1103 if repo.ui.configbool(b'storage', b'revbranchcache.mmap'):
1082 1104 with repo.cachevfs(_rbcrevs) as fp:
1083 1105 data = util.buffer(util.mmapread(fp))
1084 1106 else:
1085 1107 data = repo.cachevfs.read(_rbcrevs)
1086 1108 self._rbcrevs = rbcrevs(data)
1087 1109 except (IOError, OSError) as inst:
1088 1110 repo.ui.debug(
1089 1111 b"couldn't read revision branch cache: %s\n"
1090 1112 % stringutil.forcebytestr(inst)
1091 1113 )
1092 1114 # remember number of good records on disk
1093 1115 self._rbcrevslen = min(
1094 1116 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
1095 1117 )
1096 1118 if self._rbcrevslen == 0:
1097 1119 self._names = []
1098 1120 self._rbcnamescount = len(self._names) # number of names read at
1099 1121 # _rbcsnameslen
1100 1122
1101 1123 def _clear(self):
1102 1124 self._rbcsnameslen = 0
1103 1125 del self._names[:]
1104 1126 self._rbcnamescount = 0
1105 1127 self._rbcrevslen = len(self._repo.changelog)
1106 1128 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
1107 1129 util.clearcachedproperty(self, b'_namesreverse')
1108 1130
1109 1131 @util.propertycache
1110 1132 def _namesreverse(self):
1111 1133 return {b: r for r, b in enumerate(self._names)}
1112 1134
1113 1135 def branchinfo(self, rev):
1114 1136 """Return branch name and close flag for rev, using and updating
1115 1137 persistent cache."""
1116 1138 changelog = self._repo.changelog
1117 1139 rbcrevidx = rev * _rbcrecsize
1118 1140
1119 1141 # avoid negative index, changelog.read(nullrev) is fast without cache
1120 1142 if rev == nullrev:
1121 1143 return changelog.branchinfo(rev)
1122 1144
1123 1145 # if requested rev isn't allocated, grow and cache the rev info
1124 1146 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
1125 1147 return self._branchinfo(rev)
1126 1148
1127 1149 # fast path: extract data from cache, use it if node is matching
1128 1150 reponode = changelog.node(rev)[:_rbcnodelen]
1129 1151 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
1130 1152 close = bool(branchidx & _rbccloseflag)
1131 1153 if close:
1132 1154 branchidx &= _rbcbranchidxmask
1133 1155 if cachenode == b'\0\0\0\0':
1134 1156 pass
1135 1157 elif cachenode == reponode:
1136 1158 try:
1137 1159 return self._names[branchidx], close
1138 1160 except IndexError:
1139 1161 # recover from invalid reference to unknown branch
1140 1162 self._repo.ui.debug(
1141 1163 b"referenced branch names not found"
1142 1164 b" - rebuilding revision branch cache from scratch\n"
1143 1165 )
1144 1166 self._clear()
1145 1167 else:
1146 1168 # rev/node map has changed, invalidate the cache from here up
1147 1169 self._repo.ui.debug(
1148 1170 b"history modification detected - truncating "
1149 1171 b"revision branch cache to revision %d\n" % rev
1150 1172 )
1151 1173 truncate = rbcrevidx + _rbcrecsize
1152 1174 self._rbcrevs.truncate(truncate)
1153 1175 self._rbcrevslen = min(self._rbcrevslen, truncate)
1154 1176
1155 1177 # fall back to slow path and make sure it will be written to disk
1156 1178 return self._branchinfo(rev)
1157 1179
1158 1180 def _branchinfo(self, rev):
1159 1181 """Retrieve branch info from changelog and update _rbcrevs"""
1160 1182 changelog = self._repo.changelog
1161 1183 b, close = changelog.branchinfo(rev)
1162 1184 if b in self._namesreverse:
1163 1185 branchidx = self._namesreverse[b]
1164 1186 else:
1165 1187 branchidx = len(self._names)
1166 1188 self._names.append(b)
1167 1189 self._namesreverse[b] = branchidx
1168 1190 reponode = changelog.node(rev)
1169 1191 if close:
1170 1192 branchidx |= _rbccloseflag
1171 1193 self._setcachedata(rev, reponode, branchidx)
1172 1194 return b, close
1173 1195
1174 1196 def setdata(self, rev, changelogrevision):
1175 1197 """add new data information to the cache"""
1176 1198 branch, close = changelogrevision.branchinfo
1177 1199
1178 1200 if branch in self._namesreverse:
1179 1201 branchidx = self._namesreverse[branch]
1180 1202 else:
1181 1203 branchidx = len(self._names)
1182 1204 self._names.append(branch)
1183 1205 self._namesreverse[branch] = branchidx
1184 1206 if close:
1185 1207 branchidx |= _rbccloseflag
1186 1208 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
1187 1209 # If no cache data were readable (non exists, bad permission, etc)
1188 1210 # the cache was bypassing itself by setting:
1189 1211 #
1190 1212 # self.branchinfo = self._branchinfo
1191 1213 #
1192 1214 # Since we now have data in the cache, we need to drop this bypassing.
1193 1215 if 'branchinfo' in vars(self):
1194 1216 del self.branchinfo
1195 1217
1196 1218 def _setcachedata(self, rev, node, branchidx):
1197 1219 """Writes the node's branch data to the in-memory cache data."""
1198 1220 if rev == nullrev:
1199 1221 return
1200 1222 rbcrevidx = rev * _rbcrecsize
1201 1223 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
1202 1224 self._rbcrevslen = min(self._rbcrevslen, rev)
1203 1225
1204 1226 tr = self._repo.currenttransaction()
1205 1227 if tr:
1206 1228 tr.addfinalize(b'write-revbranchcache', self.write)
1207 1229
1208 1230 def write(self, tr=None):
1209 1231 """Save branch cache if it is dirty."""
1210 1232 repo = self._repo
1211 1233 wlock = None
1212 1234 step = b''
1213 1235 try:
1214 1236 # write the new names
1215 1237 if self._rbcnamescount < len(self._names):
1216 1238 wlock = repo.wlock(wait=False)
1217 1239 step = b' names'
1218 1240 self._writenames(repo)
1219 1241
1220 1242 # write the new revs
1221 1243 start = self._rbcrevslen * _rbcrecsize
1222 1244 if start != len(self._rbcrevs):
1223 1245 step = b''
1224 1246 if wlock is None:
1225 1247 wlock = repo.wlock(wait=False)
1226 1248 self._writerevs(repo, start)
1227 1249
1228 1250 except (IOError, OSError, error.Abort, error.LockError) as inst:
1229 1251 repo.ui.debug(
1230 1252 b"couldn't write revision branch cache%s: %s\n"
1231 1253 % (step, stringutil.forcebytestr(inst))
1232 1254 )
1233 1255 finally:
1234 1256 if wlock is not None:
1235 1257 wlock.release()
1236 1258
1237 1259 def _writenames(self, repo):
1238 1260 """write the new branch names to revbranchcache"""
1239 1261 if self._rbcnamescount != 0:
1240 1262 f = repo.cachevfs.open(_rbcnames, b'ab')
1241 1263 if f.tell() == self._rbcsnameslen:
1242 1264 f.write(b'\0')
1243 1265 else:
1244 1266 f.close()
1245 1267 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1246 1268 self._rbcnamescount = 0
1247 1269 self._rbcrevslen = 0
1248 1270 if self._rbcnamescount == 0:
1249 1271 # before rewriting names, make sure references are removed
1250 1272 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1251 1273 f = repo.cachevfs.open(_rbcnames, b'wb')
1252 1274 f.write(
1253 1275 b'\0'.join(
1254 1276 encoding.fromlocal(b)
1255 1277 for b in self._names[self._rbcnamescount :]
1256 1278 )
1257 1279 )
1258 1280 self._rbcsnameslen = f.tell()
1259 1281 f.close()
1260 1282 self._rbcnamescount = len(self._names)
1261 1283
1262 1284 def _writerevs(self, repo, start):
1263 1285 """write the new revs to revbranchcache"""
1264 1286 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1265 1287 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1266 1288 if f.tell() != start:
1267 1289 repo.ui.debug(
1268 1290 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1269 1291 )
1270 1292 f.seek(start)
1271 1293 if f.tell() != start:
1272 1294 start = 0
1273 1295 f.seek(start)
1274 1296 f.truncate()
1275 1297 end = revs * _rbcrecsize
1276 1298 f.write(self._rbcrevs.slice(start, end))
1277 1299 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now