##// END OF EJS Templates
branchmap: pytype is confused about bytestr...
Augie Fackler -
r43805:1a47fe4b default
parent child Browse files
Show More
@@ -1,732 +1,738
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 repoviewutil,
27 27 stringutil,
28 28 )
29 29
30 30 subsettable = repoviewutil.subsettable
31 31
32 32 calcsize = struct.calcsize
33 33 pack_into = struct.pack_into
34 34 unpack_from = struct.unpack_from
35 35
36 36
37 37 class BranchMapCache(object):
38 38 """mapping of filtered views of repo with their branchcache"""
39 39
40 40 def __init__(self):
41 41 self._per_filter = {}
42 42
43 43 def __getitem__(self, repo):
44 44 self.updatecache(repo)
45 45 return self._per_filter[repo.filtername]
46 46
47 47 def updatecache(self, repo):
48 48 """Update the cache for the given filtered view on a repository"""
49 49 # This can trigger updates for the caches for subsets of the filtered
50 50 # view, e.g. when there is no cache for this filtered view or the cache
51 51 # is stale.
52 52
53 53 cl = repo.changelog
54 54 filtername = repo.filtername
55 55 bcache = self._per_filter.get(filtername)
56 56 if bcache is None or not bcache.validfor(repo):
57 57 # cache object missing or cache object stale? Read from disk
58 58 bcache = branchcache.fromfile(repo)
59 59
60 60 revs = []
61 61 if bcache is None:
62 62 # no (fresh) cache available anymore, perhaps we can re-use
63 63 # the cache for a subset, then extend that to add info on missing
64 64 # revisions.
65 65 subsetname = subsettable.get(filtername)
66 66 if subsetname is not None:
67 67 subset = repo.filtered(subsetname)
68 68 bcache = self[subset].copy()
69 69 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
70 70 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
71 71 else:
72 72 # nothing to fall back on, start empty.
73 73 bcache = branchcache()
74 74
75 75 revs.extend(cl.revs(start=bcache.tiprev + 1))
76 76 if revs:
77 77 bcache.update(repo, revs)
78 78
79 79 assert bcache.validfor(repo), filtername
80 80 self._per_filter[repo.filtername] = bcache
81 81
82 82 def replace(self, repo, remotebranchmap):
83 83 """Replace the branchmap cache for a repo with a branch mapping.
84 84
85 85 This is likely only called during clone with a branch map from a
86 86 remote.
87 87
88 88 """
89 89 cl = repo.changelog
90 90 clrev = cl.rev
91 91 clbranchinfo = cl.branchinfo
92 92 rbheads = []
93 93 closed = []
94 94 for bheads in pycompat.itervalues(remotebranchmap):
95 95 rbheads += bheads
96 96 for h in bheads:
97 97 r = clrev(h)
98 98 b, c = clbranchinfo(r)
99 99 if c:
100 100 closed.append(h)
101 101
102 102 if rbheads:
103 103 rtiprev = max((int(clrev(node)) for node in rbheads))
104 104 cache = branchcache(
105 105 remotebranchmap,
106 106 repo[rtiprev].node(),
107 107 rtiprev,
108 108 closednodes=closed,
109 109 )
110 110
111 111 # Try to stick it as low as possible
112 112 # filter above served are unlikely to be fetch from a clone
113 113 for candidate in (b'base', b'immutable', b'served'):
114 114 rview = repo.filtered(candidate)
115 115 if cache.validfor(rview):
116 116 self._per_filter[candidate] = cache
117 117 cache.write(rview)
118 118 return
119 119
120 120 def clear(self):
121 121 self._per_filter.clear()
122 122
123 123
124 124 def _unknownnode(node):
125 125 """ raises ValueError when branchcache found a node which does not exists
126 126 """
127 127 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
128 128
129 129
130 130 def _branchcachedesc(repo):
131 131 if repo.filtername is not None:
132 132 return b'branch cache (%s)' % repo.filtername
133 133 else:
134 134 return b'branch cache'
135 135
136 136
137 137 class branchcache(object):
138 138 """A dict like object that hold branches heads cache.
139 139
140 140 This cache is used to avoid costly computations to determine all the
141 141 branch heads of a repo.
142 142
143 143 The cache is serialized on disk in the following format:
144 144
145 145 <tip hex node> <tip rev number> [optional filtered repo hex hash]
146 146 <branch head hex node> <open/closed state> <branch name>
147 147 <branch head hex node> <open/closed state> <branch name>
148 148 ...
149 149
150 150 The first line is used to check if the cache is still valid. If the
151 151 branch cache is for a filtered repo view, an optional third hash is
152 152 included that hashes the hashes of all filtered revisions.
153 153
154 154 The open/closed state is represented by a single letter 'o' or 'c'.
155 155 This field can be used to avoid changelog reads when determining if a
156 156 branch head closes a branch or not.
157 157 """
158 158
159 159 def __init__(
160 160 self,
161 161 entries=(),
162 162 tipnode=nullid,
163 163 tiprev=nullrev,
164 164 filteredhash=None,
165 165 closednodes=None,
166 166 hasnode=None,
167 167 ):
168 168 """ hasnode is a function which can be used to verify whether changelog
169 169 has a given node or not. If it's not provided, we assume that every node
170 170 we have exists in changelog """
171 171 self.tipnode = tipnode
172 172 self.tiprev = tiprev
173 173 self.filteredhash = filteredhash
174 174 # closednodes is a set of nodes that close their branch. If the branch
175 175 # cache has been updated, it may contain nodes that are no longer
176 176 # heads.
177 177 if closednodes is None:
178 178 self._closednodes = set()
179 179 else:
180 180 self._closednodes = closednodes
181 181 self._entries = dict(entries)
182 182 # whether closed nodes are verified or not
183 183 self._closedverified = False
184 184 # branches for which nodes are verified
185 185 self._verifiedbranches = set()
186 186 self._hasnode = hasnode
187 187 if self._hasnode is None:
188 188 self._hasnode = lambda x: True
189 189
190 190 def _verifyclosed(self):
191 191 """ verify the closed nodes we have """
192 192 if self._closedverified:
193 193 return
194 194 for node in self._closednodes:
195 195 if not self._hasnode(node):
196 196 _unknownnode(node)
197 197
198 198 self._closedverified = True
199 199
200 200 def _verifybranch(self, branch):
201 201 """ verify head nodes for the given branch. """
202 202 if branch not in self._entries or branch in self._verifiedbranches:
203 203 return
204 204 for n in self._entries[branch]:
205 205 if not self._hasnode(n):
206 206 _unknownnode(n)
207 207
208 208 self._verifiedbranches.add(branch)
209 209
210 210 def _verifyall(self):
211 211 """ verifies nodes of all the branches """
212 212 needverification = set(self._entries.keys()) - self._verifiedbranches
213 213 for b in needverification:
214 214 self._verifybranch(b)
215 215
216 216 def __iter__(self):
217 217 return iter(self._entries)
218 218
219 219 def __setitem__(self, key, value):
220 220 self._entries[key] = value
221 221
222 222 def __getitem__(self, key):
223 223 self._verifybranch(key)
224 224 return self._entries[key]
225 225
226 226 def __contains__(self, key):
227 227 self._verifybranch(key)
228 228 return key in self._entries
229 229
230 230 def iteritems(self):
231 231 for k, v in pycompat.iteritems(self._entries):
232 232 self._verifybranch(k)
233 233 yield k, v
234 234
235 235 items = iteritems
236 236
237 237 def hasbranch(self, label):
238 238 """ checks whether a branch of this name exists or not """
239 239 self._verifybranch(label)
240 240 return label in self._entries
241 241
242 242 @classmethod
243 243 def fromfile(cls, repo):
244 244 f = None
245 245 try:
246 246 f = repo.cachevfs(cls._filename(repo))
247 247 lineiter = iter(f)
248 248 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
249 249 last, lrev = cachekey[:2]
250 250 last, lrev = bin(last), int(lrev)
251 251 filteredhash = None
252 252 hasnode = repo.changelog.hasnode
253 253 if len(cachekey) > 2:
254 254 filteredhash = bin(cachekey[2])
255 255 bcache = cls(
256 256 tipnode=last,
257 257 tiprev=lrev,
258 258 filteredhash=filteredhash,
259 259 hasnode=hasnode,
260 260 )
261 261 if not bcache.validfor(repo):
262 262 # invalidate the cache
263 263 raise ValueError(r'tip differs')
264 264 bcache.load(repo, lineiter)
265 265 except (IOError, OSError):
266 266 return None
267 267
268 268 except Exception as inst:
269 269 if repo.ui.debugflag:
270 270 msg = b'invalid %s: %s\n'
271 271 repo.ui.debug(
272 msg % (_branchcachedesc(repo), pycompat.bytestr(inst))
272 msg
273 % (
274 _branchcachedesc(repo),
275 pycompat.bytestr(
276 inst # pytype: disable=wrong-arg-types
277 ),
278 )
273 279 )
274 280 bcache = None
275 281
276 282 finally:
277 283 if f:
278 284 f.close()
279 285
280 286 return bcache
281 287
282 288 def load(self, repo, lineiter):
283 289 """ fully loads the branchcache by reading from the file using the line
284 290 iterator passed"""
285 291 for line in lineiter:
286 292 line = line.rstrip(b'\n')
287 293 if not line:
288 294 continue
289 295 node, state, label = line.split(b" ", 2)
290 296 if state not in b'oc':
291 297 raise ValueError(r'invalid branch state')
292 298 label = encoding.tolocal(label.strip())
293 299 node = bin(node)
294 300 self._entries.setdefault(label, []).append(node)
295 301 if state == b'c':
296 302 self._closednodes.add(node)
297 303
298 304 @staticmethod
299 305 def _filename(repo):
300 306 """name of a branchcache file for a given repo or repoview"""
301 307 filename = b"branch2"
302 308 if repo.filtername:
303 309 filename = b'%s-%s' % (filename, repo.filtername)
304 310 return filename
305 311
306 312 def validfor(self, repo):
307 313 """Is the cache content valid regarding a repo
308 314
309 315 - False when cached tipnode is unknown or if we detect a strip.
310 316 - True when cache is up to date or a subset of current repo."""
311 317 try:
312 318 return (self.tipnode == repo.changelog.node(self.tiprev)) and (
313 319 self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
314 320 )
315 321 except IndexError:
316 322 return False
317 323
318 324 def _branchtip(self, heads):
319 325 '''Return tuple with last open head in heads and false,
320 326 otherwise return last closed head and true.'''
321 327 tip = heads[-1]
322 328 closed = True
323 329 for h in reversed(heads):
324 330 if h not in self._closednodes:
325 331 tip = h
326 332 closed = False
327 333 break
328 334 return tip, closed
329 335
330 336 def branchtip(self, branch):
331 337 '''Return the tipmost open head on branch head, otherwise return the
332 338 tipmost closed head on branch.
333 339 Raise KeyError for unknown branch.'''
334 340 return self._branchtip(self[branch])[0]
335 341
336 342 def iteropen(self, nodes):
337 343 return (n for n in nodes if n not in self._closednodes)
338 344
339 345 def branchheads(self, branch, closed=False):
340 346 self._verifybranch(branch)
341 347 heads = self._entries[branch]
342 348 if not closed:
343 349 heads = list(self.iteropen(heads))
344 350 return heads
345 351
346 352 def iterbranches(self):
347 353 for bn, heads in pycompat.iteritems(self):
348 354 yield (bn, heads) + self._branchtip(heads)
349 355
350 356 def iterheads(self):
351 357 """ returns all the heads """
352 358 self._verifyall()
353 359 return pycompat.itervalues(self._entries)
354 360
355 361 def copy(self):
356 362 """return an deep copy of the branchcache object"""
357 363 return type(self)(
358 364 self._entries,
359 365 self.tipnode,
360 366 self.tiprev,
361 367 self.filteredhash,
362 368 self._closednodes,
363 369 )
364 370
365 371 def write(self, repo):
366 372 try:
367 373 f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
368 374 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
369 375 if self.filteredhash is not None:
370 376 cachekey.append(hex(self.filteredhash))
371 377 f.write(b" ".join(cachekey) + b'\n')
372 378 nodecount = 0
373 379 for label, nodes in sorted(pycompat.iteritems(self._entries)):
374 380 label = encoding.fromlocal(label)
375 381 for node in nodes:
376 382 nodecount += 1
377 383 if node in self._closednodes:
378 384 state = b'c'
379 385 else:
380 386 state = b'o'
381 387 f.write(b"%s %s %s\n" % (hex(node), state, label))
382 388 f.close()
383 389 repo.ui.log(
384 390 b'branchcache',
385 391 b'wrote %s with %d labels and %d nodes\n',
386 392 _branchcachedesc(repo),
387 393 len(self._entries),
388 394 nodecount,
389 395 )
390 396 except (IOError, OSError, error.Abort) as inst:
391 397 # Abort may be raised by read only opener, so log and continue
392 398 repo.ui.debug(
393 399 b"couldn't write branch cache: %s\n"
394 400 % stringutil.forcebytestr(inst)
395 401 )
396 402
397 403 def update(self, repo, revgen):
398 404 """Given a branchhead cache, self, that may have extra nodes or be
399 405 missing heads, and a generator of nodes that are strictly a superset of
400 406 heads missing, this function updates self to be correct.
401 407 """
402 408 starttime = util.timer()
403 409 cl = repo.changelog
404 410 # collect new branch entries
405 411 newbranches = {}
406 412 getbranchinfo = repo.revbranchcache().branchinfo
407 413 for r in revgen:
408 414 branch, closesbranch = getbranchinfo(r)
409 415 newbranches.setdefault(branch, []).append(r)
410 416 if closesbranch:
411 417 self._closednodes.add(cl.node(r))
412 418
413 419 # fetch current topological heads to speed up filtering
414 420 topoheads = set(cl.headrevs())
415 421
416 422 # new tip revision which we found after iterating items from new
417 423 # branches
418 424 ntiprev = self.tiprev
419 425
420 426 # if older branchheads are reachable from new ones, they aren't
421 427 # really branchheads. Note checking parents is insufficient:
422 428 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
423 429 for branch, newheadrevs in pycompat.iteritems(newbranches):
424 430 bheads = self._entries.setdefault(branch, [])
425 431 bheadset = set(cl.rev(node) for node in bheads)
426 432
427 433 # This have been tested True on all internal usage of this function.
428 434 # run it again in case of doubt
429 435 # assert not (set(bheadrevs) & set(newheadrevs))
430 436 bheadset.update(newheadrevs)
431 437
432 438 # This prunes out two kinds of heads - heads that are superseded by
433 439 # a head in newheadrevs, and newheadrevs that are not heads because
434 440 # an existing head is their descendant.
435 441 uncertain = bheadset - topoheads
436 442 if uncertain:
437 443 floorrev = min(uncertain)
438 444 ancestors = set(cl.ancestors(newheadrevs, floorrev))
439 445 bheadset -= ancestors
440 446 bheadrevs = sorted(bheadset)
441 447 self[branch] = [cl.node(rev) for rev in bheadrevs]
442 448 tiprev = bheadrevs[-1]
443 449 if tiprev > ntiprev:
444 450 ntiprev = tiprev
445 451
446 452 if ntiprev > self.tiprev:
447 453 self.tiprev = ntiprev
448 454 self.tipnode = cl.node(ntiprev)
449 455
450 456 if not self.validfor(repo):
451 457 # cache key are not valid anymore
452 458 self.tipnode = nullid
453 459 self.tiprev = nullrev
454 460 for heads in self.iterheads():
455 461 tiprev = max(cl.rev(node) for node in heads)
456 462 if tiprev > self.tiprev:
457 463 self.tipnode = cl.node(tiprev)
458 464 self.tiprev = tiprev
459 465 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
460 466
461 467 duration = util.timer() - starttime
462 468 repo.ui.log(
463 469 b'branchcache',
464 470 b'updated %s in %.4f seconds\n',
465 471 _branchcachedesc(repo),
466 472 duration,
467 473 )
468 474
469 475 self.write(repo)
470 476
471 477
472 478 class remotebranchcache(branchcache):
473 479 """Branchmap info for a remote connection, should not write locally"""
474 480
475 481 def write(self, repo):
476 482 pass
477 483
478 484
479 485 # Revision branch info cache
480 486
481 487 _rbcversion = b'-v1'
482 488 _rbcnames = b'rbc-names' + _rbcversion
483 489 _rbcrevs = b'rbc-revs' + _rbcversion
484 490 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
485 491 _rbcrecfmt = b'>4sI'
486 492 _rbcrecsize = calcsize(_rbcrecfmt)
487 493 _rbcnodelen = 4
488 494 _rbcbranchidxmask = 0x7FFFFFFF
489 495 _rbccloseflag = 0x80000000
490 496
491 497
492 498 class revbranchcache(object):
493 499 """Persistent cache, mapping from revision number to branch name and close.
494 500 This is a low level cache, independent of filtering.
495 501
496 502 Branch names are stored in rbc-names in internal encoding separated by 0.
497 503 rbc-names is append-only, and each branch name is only stored once and will
498 504 thus have a unique index.
499 505
500 506 The branch info for each revision is stored in rbc-revs as constant size
501 507 records. The whole file is read into memory, but it is only 'parsed' on
502 508 demand. The file is usually append-only but will be truncated if repo
503 509 modification is detected.
504 510 The record for each revision contains the first 4 bytes of the
505 511 corresponding node hash, and the record is only used if it still matches.
506 512 Even a completely trashed rbc-revs fill thus still give the right result
507 513 while converging towards full recovery ... assuming no incorrectly matching
508 514 node hashes.
509 515 The record also contains 4 bytes where 31 bits contains the index of the
510 516 branch and the last bit indicate that it is a branch close commit.
511 517 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
512 518 and will grow with it but be 1/8th of its size.
513 519 """
514 520
515 521 def __init__(self, repo, readonly=True):
516 522 assert repo.filtername is None
517 523 self._repo = repo
518 524 self._names = [] # branch names in local encoding with static index
519 525 self._rbcrevs = bytearray()
520 526 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
521 527 try:
522 528 bndata = repo.cachevfs.read(_rbcnames)
523 529 self._rbcsnameslen = len(bndata) # for verification before writing
524 530 if bndata:
525 531 self._names = [
526 532 encoding.tolocal(bn) for bn in bndata.split(b'\0')
527 533 ]
528 534 except (IOError, OSError):
529 535 if readonly:
530 536 # don't try to use cache - fall back to the slow path
531 537 self.branchinfo = self._branchinfo
532 538
533 539 if self._names:
534 540 try:
535 541 data = repo.cachevfs.read(_rbcrevs)
536 542 self._rbcrevs[:] = data
537 543 except (IOError, OSError) as inst:
538 544 repo.ui.debug(
539 545 b"couldn't read revision branch cache: %s\n"
540 546 % stringutil.forcebytestr(inst)
541 547 )
542 548 # remember number of good records on disk
543 549 self._rbcrevslen = min(
544 550 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
545 551 )
546 552 if self._rbcrevslen == 0:
547 553 self._names = []
548 554 self._rbcnamescount = len(self._names) # number of names read at
549 555 # _rbcsnameslen
550 556
551 557 def _clear(self):
552 558 self._rbcsnameslen = 0
553 559 del self._names[:]
554 560 self._rbcnamescount = 0
555 561 self._rbcrevslen = len(self._repo.changelog)
556 562 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
557 563 util.clearcachedproperty(self, b'_namesreverse')
558 564
559 565 @util.propertycache
560 566 def _namesreverse(self):
561 567 return dict((b, r) for r, b in enumerate(self._names))
562 568
563 569 def branchinfo(self, rev):
564 570 """Return branch name and close flag for rev, using and updating
565 571 persistent cache."""
566 572 changelog = self._repo.changelog
567 573 rbcrevidx = rev * _rbcrecsize
568 574
569 575 # avoid negative index, changelog.read(nullrev) is fast without cache
570 576 if rev == nullrev:
571 577 return changelog.branchinfo(rev)
572 578
573 579 # if requested rev isn't allocated, grow and cache the rev info
574 580 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
575 581 return self._branchinfo(rev)
576 582
577 583 # fast path: extract data from cache, use it if node is matching
578 584 reponode = changelog.node(rev)[:_rbcnodelen]
579 585 cachenode, branchidx = unpack_from(
580 586 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
581 587 )
582 588 close = bool(branchidx & _rbccloseflag)
583 589 if close:
584 590 branchidx &= _rbcbranchidxmask
585 591 if cachenode == b'\0\0\0\0':
586 592 pass
587 593 elif cachenode == reponode:
588 594 try:
589 595 return self._names[branchidx], close
590 596 except IndexError:
591 597 # recover from invalid reference to unknown branch
592 598 self._repo.ui.debug(
593 599 b"referenced branch names not found"
594 600 b" - rebuilding revision branch cache from scratch\n"
595 601 )
596 602 self._clear()
597 603 else:
598 604 # rev/node map has changed, invalidate the cache from here up
599 605 self._repo.ui.debug(
600 606 b"history modification detected - truncating "
601 607 b"revision branch cache to revision %d\n" % rev
602 608 )
603 609 truncate = rbcrevidx + _rbcrecsize
604 610 del self._rbcrevs[truncate:]
605 611 self._rbcrevslen = min(self._rbcrevslen, truncate)
606 612
607 613 # fall back to slow path and make sure it will be written to disk
608 614 return self._branchinfo(rev)
609 615
610 616 def _branchinfo(self, rev):
611 617 """Retrieve branch info from changelog and update _rbcrevs"""
612 618 changelog = self._repo.changelog
613 619 b, close = changelog.branchinfo(rev)
614 620 if b in self._namesreverse:
615 621 branchidx = self._namesreverse[b]
616 622 else:
617 623 branchidx = len(self._names)
618 624 self._names.append(b)
619 625 self._namesreverse[b] = branchidx
620 626 reponode = changelog.node(rev)
621 627 if close:
622 628 branchidx |= _rbccloseflag
623 629 self._setcachedata(rev, reponode, branchidx)
624 630 return b, close
625 631
626 632 def setdata(self, branch, rev, node, close):
627 633 """add new data information to the cache"""
628 634 if branch in self._namesreverse:
629 635 branchidx = self._namesreverse[branch]
630 636 else:
631 637 branchidx = len(self._names)
632 638 self._names.append(branch)
633 639 self._namesreverse[branch] = branchidx
634 640 if close:
635 641 branchidx |= _rbccloseflag
636 642 self._setcachedata(rev, node, branchidx)
637 643 # If no cache data were readable (non exists, bad permission, etc)
638 644 # the cache was bypassing itself by setting:
639 645 #
640 646 # self.branchinfo = self._branchinfo
641 647 #
642 648 # Since we now have data in the cache, we need to drop this bypassing.
643 649 if r'branchinfo' in vars(self):
644 650 del self.branchinfo
645 651
646 652 def _setcachedata(self, rev, node, branchidx):
647 653 """Writes the node's branch data to the in-memory cache data."""
648 654 if rev == nullrev:
649 655 return
650 656 rbcrevidx = rev * _rbcrecsize
651 657 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
652 658 self._rbcrevs.extend(
653 659 b'\0'
654 660 * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
655 661 )
656 662 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
657 663 self._rbcrevslen = min(self._rbcrevslen, rev)
658 664
659 665 tr = self._repo.currenttransaction()
660 666 if tr:
661 667 tr.addfinalize(b'write-revbranchcache', self.write)
662 668
663 669 def write(self, tr=None):
664 670 """Save branch cache if it is dirty."""
665 671 repo = self._repo
666 672 wlock = None
667 673 step = b''
668 674 try:
669 675 # write the new names
670 676 if self._rbcnamescount < len(self._names):
671 677 wlock = repo.wlock(wait=False)
672 678 step = b' names'
673 679 self._writenames(repo)
674 680
675 681 # write the new revs
676 682 start = self._rbcrevslen * _rbcrecsize
677 683 if start != len(self._rbcrevs):
678 684 step = b''
679 685 if wlock is None:
680 686 wlock = repo.wlock(wait=False)
681 687 self._writerevs(repo, start)
682 688
683 689 except (IOError, OSError, error.Abort, error.LockError) as inst:
684 690 repo.ui.debug(
685 691 b"couldn't write revision branch cache%s: %s\n"
686 692 % (step, stringutil.forcebytestr(inst))
687 693 )
688 694 finally:
689 695 if wlock is not None:
690 696 wlock.release()
691 697
692 698 def _writenames(self, repo):
693 699 """ write the new branch names to revbranchcache """
694 700 if self._rbcnamescount != 0:
695 701 f = repo.cachevfs.open(_rbcnames, b'ab')
696 702 if f.tell() == self._rbcsnameslen:
697 703 f.write(b'\0')
698 704 else:
699 705 f.close()
700 706 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
701 707 self._rbcnamescount = 0
702 708 self._rbcrevslen = 0
703 709 if self._rbcnamescount == 0:
704 710 # before rewriting names, make sure references are removed
705 711 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
706 712 f = repo.cachevfs.open(_rbcnames, b'wb')
707 713 f.write(
708 714 b'\0'.join(
709 715 encoding.fromlocal(b)
710 716 for b in self._names[self._rbcnamescount :]
711 717 )
712 718 )
713 719 self._rbcsnameslen = f.tell()
714 720 f.close()
715 721 self._rbcnamescount = len(self._names)
716 722
717 723 def _writerevs(self, repo, start):
718 724 """ write the new revs to revbranchcache """
719 725 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
720 726 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
721 727 if f.tell() != start:
722 728 repo.ui.debug(
723 729 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
724 730 )
725 731 f.seek(start)
726 732 if f.tell() != start:
727 733 start = 0
728 734 f.seek(start)
729 735 f.truncate()
730 736 end = revs * _rbcrecsize
731 737 f.write(self._rbcrevs[start:end])
732 738 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now