##// END OF EJS Templates
branchcache: fix the docstring of _verifybranch()...
Pulkit Goyal -
r42301:d9dc0896 default
parent child Browse files
Show More
@@ -1,668 +1,667 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served.hidden': 'served',
44 44 'served': 'immutable',
45 45 'immutable': 'base'}
46 46
47 47
48 48 class BranchMapCache(object):
49 49 """mapping of filtered views of repo with their branchcache"""
50 50 def __init__(self):
51 51 self._per_filter = {}
52 52
53 53 def __getitem__(self, repo):
54 54 self.updatecache(repo)
55 55 return self._per_filter[repo.filtername]
56 56
57 57 def updatecache(self, repo):
58 58 """Update the cache for the given filtered view on a repository"""
59 59 # This can trigger updates for the caches for subsets of the filtered
60 60 # view, e.g. when there is no cache for this filtered view or the cache
61 61 # is stale.
62 62
63 63 cl = repo.changelog
64 64 filtername = repo.filtername
65 65 bcache = self._per_filter.get(filtername)
66 66 if bcache is None or not bcache.validfor(repo):
67 67 # cache object missing or cache object stale? Read from disk
68 68 bcache = branchcache.fromfile(repo)
69 69
70 70 revs = []
71 71 if bcache is None:
72 72 # no (fresh) cache available anymore, perhaps we can re-use
73 73 # the cache for a subset, then extend that to add info on missing
74 74 # revisions.
75 75 subsetname = subsettable.get(filtername)
76 76 if subsetname is not None:
77 77 subset = repo.filtered(subsetname)
78 78 bcache = self[subset].copy()
79 79 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
80 80 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
81 81 else:
82 82 # nothing to fall back on, start empty.
83 83 bcache = branchcache()
84 84
85 85 revs.extend(cl.revs(start=bcache.tiprev + 1))
86 86 if revs:
87 87 bcache.update(repo, revs)
88 88
89 89 assert bcache.validfor(repo), filtername
90 90 self._per_filter[repo.filtername] = bcache
91 91
92 92 def replace(self, repo, remotebranchmap):
93 93 """Replace the branchmap cache for a repo with a branch mapping.
94 94
95 95 This is likely only called during clone with a branch map from a
96 96 remote.
97 97
98 98 """
99 99 cl = repo.changelog
100 100 clrev = cl.rev
101 101 clbranchinfo = cl.branchinfo
102 102 rbheads = []
103 103 closed = []
104 104 for bheads in remotebranchmap.itervalues():
105 105 rbheads += bheads
106 106 for h in bheads:
107 107 r = clrev(h)
108 108 b, c = clbranchinfo(r)
109 109 if c:
110 110 closed.append(h)
111 111
112 112 if rbheads:
113 113 rtiprev = max((int(clrev(node)) for node in rbheads))
114 114 cache = branchcache(
115 115 remotebranchmap, repo[rtiprev].node(), rtiprev,
116 116 closednodes=closed)
117 117
118 118 # Try to stick it as low as possible
119 119 # filter above served are unlikely to be fetch from a clone
120 120 for candidate in ('base', 'immutable', 'served'):
121 121 rview = repo.filtered(candidate)
122 122 if cache.validfor(rview):
123 123 self._per_filter[candidate] = cache
124 124 cache.write(rview)
125 125 return
126 126
127 127 def clear(self):
128 128 self._per_filter.clear()
129 129
130 130 def _unknownnode(node):
131 131 """ raises ValueError when branchcache found a node which does not exists
132 132 """
133 133 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
134 134
135 135 class branchcache(object):
136 136 """A dict like object that hold branches heads cache.
137 137
138 138 This cache is used to avoid costly computations to determine all the
139 139 branch heads of a repo.
140 140
141 141 The cache is serialized on disk in the following format:
142 142
143 143 <tip hex node> <tip rev number> [optional filtered repo hex hash]
144 144 <branch head hex node> <open/closed state> <branch name>
145 145 <branch head hex node> <open/closed state> <branch name>
146 146 ...
147 147
148 148 The first line is used to check if the cache is still valid. If the
149 149 branch cache is for a filtered repo view, an optional third hash is
150 150 included that hashes the hashes of all filtered revisions.
151 151
152 152 The open/closed state is represented by a single letter 'o' or 'c'.
153 153 This field can be used to avoid changelog reads when determining if a
154 154 branch head closes a branch or not.
155 155 """
156 156
157 157 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
158 158 filteredhash=None, closednodes=None, hasnode=None):
159 159 """ hasnode is a function which can be used to verify whether changelog
160 160 has a given node or not. If it's not provided, we assume that every node
161 161 we have exists in changelog """
162 162 self.tipnode = tipnode
163 163 self.tiprev = tiprev
164 164 self.filteredhash = filteredhash
165 165 # closednodes is a set of nodes that close their branch. If the branch
166 166 # cache has been updated, it may contain nodes that are no longer
167 167 # heads.
168 168 if closednodes is None:
169 169 self._closednodes = set()
170 170 else:
171 171 self._closednodes = closednodes
172 172 self._entries = dict(entries)
173 173 # whether closed nodes are verified or not
174 174 self._closedverified = False
175 175 # branches for which nodes are verified
176 176 self._verifiedbranches = set()
177 177 self._hasnode = hasnode
178 178 if self._hasnode is None:
179 179 self._hasnode = lambda x: True
180 180
181 181 def _verifyclosed(self):
182 182 """ verify the closed nodes we have """
183 183 if self._closedverified:
184 184 return
185 185 for node in self._closednodes:
186 186 if not self._hasnode(node):
187 187 _unknownnode(node)
188 188
189 189 self._closedverified = True
190 190
191 191 def _verifybranch(self, branch):
192 """ verify head nodes for the given branch. If branch is None, verify
193 for all the branches """
192 """ verify head nodes for the given branch. """
194 193 if branch not in self._entries or branch in self._verifiedbranches:
195 194 return
196 195 for n in self._entries[branch]:
197 196 if not self._hasnode(n):
198 197 _unknownnode(n)
199 198
200 199 self._verifiedbranches.add(branch)
201 200
202 201 def _verifyall(self):
203 202 """ verifies nodes of all the branches """
204 203 for b in self._entries:
205 204 self._verifybranch(b)
206 205
207 206 def __iter__(self):
208 207 return iter(self._entries)
209 208
210 209 def __setitem__(self, key, value):
211 210 self._entries[key] = value
212 211
213 212 def __getitem__(self, key):
214 213 self._verifybranch(key)
215 214 return self._entries[key]
216 215
217 216 def __contains__(self, key):
218 217 self._verifybranch(key)
219 218 return key in self._entries
220 219
221 220 def iteritems(self):
222 221 self._verifyall()
223 222 return self._entries.iteritems()
224 223
225 224 def hasbranch(self, label):
226 225 """ checks whether a branch of this name exists or not """
227 226 self._verifybranch(label)
228 227 return label in self._entries
229 228
230 229 @classmethod
231 230 def fromfile(cls, repo):
232 231 f = None
233 232 try:
234 233 f = repo.cachevfs(cls._filename(repo))
235 234 lineiter = iter(f)
236 235 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
237 236 last, lrev = cachekey[:2]
238 237 last, lrev = bin(last), int(lrev)
239 238 filteredhash = None
240 239 hasnode = repo.changelog.hasnode
241 240 if len(cachekey) > 2:
242 241 filteredhash = bin(cachekey[2])
243 242 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
244 243 hasnode=hasnode)
245 244 if not bcache.validfor(repo):
246 245 # invalidate the cache
247 246 raise ValueError(r'tip differs')
248 247 bcache.load(repo, lineiter)
249 248 except (IOError, OSError):
250 249 return None
251 250
252 251 except Exception as inst:
253 252 if repo.ui.debugflag:
254 253 msg = 'invalid branchheads cache'
255 254 if repo.filtername is not None:
256 255 msg += ' (%s)' % repo.filtername
257 256 msg += ': %s\n'
258 257 repo.ui.debug(msg % pycompat.bytestr(inst))
259 258 bcache = None
260 259
261 260 finally:
262 261 if f:
263 262 f.close()
264 263
265 264 return bcache
266 265
267 266 def load(self, repo, lineiter):
268 267 """ fully loads the branchcache by reading from the file using the line
269 268 iterator passed"""
270 269 for line in lineiter:
271 270 line = line.rstrip('\n')
272 271 if not line:
273 272 continue
274 273 node, state, label = line.split(" ", 2)
275 274 if state not in 'oc':
276 275 raise ValueError(r'invalid branch state')
277 276 label = encoding.tolocal(label.strip())
278 277 node = bin(node)
279 278 self._entries.setdefault(label, []).append(node)
280 279 if state == 'c':
281 280 self._closednodes.add(node)
282 281
283 282 @staticmethod
284 283 def _filename(repo):
285 284 """name of a branchcache file for a given repo or repoview"""
286 285 filename = "branch2"
287 286 if repo.filtername:
288 287 filename = '%s-%s' % (filename, repo.filtername)
289 288 return filename
290 289
291 290 def validfor(self, repo):
292 291 """Is the cache content valid regarding a repo
293 292
294 293 - False when cached tipnode is unknown or if we detect a strip.
295 294 - True when cache is up to date or a subset of current repo."""
296 295 try:
297 296 return ((self.tipnode == repo.changelog.node(self.tiprev))
298 297 and (self.filteredhash ==
299 298 scmutil.filteredhash(repo, self.tiprev)))
300 299 except IndexError:
301 300 return False
302 301
303 302 def _branchtip(self, heads):
304 303 '''Return tuple with last open head in heads and false,
305 304 otherwise return last closed head and true.'''
306 305 tip = heads[-1]
307 306 closed = True
308 307 for h in reversed(heads):
309 308 if h not in self._closednodes:
310 309 tip = h
311 310 closed = False
312 311 break
313 312 return tip, closed
314 313
315 314 def branchtip(self, branch):
316 315 '''Return the tipmost open head on branch head, otherwise return the
317 316 tipmost closed head on branch.
318 317 Raise KeyError for unknown branch.'''
319 318 return self._branchtip(self[branch])[0]
320 319
321 320 def iteropen(self, nodes):
322 321 return (n for n in nodes if n not in self._closednodes)
323 322
324 323 def branchheads(self, branch, closed=False):
325 324 self._verifybranch(branch)
326 325 heads = self._entries[branch]
327 326 if not closed:
328 327 heads = list(self.iteropen(heads))
329 328 return heads
330 329
331 330 def iterbranches(self):
332 331 for bn, heads in self.iteritems():
333 332 yield (bn, heads) + self._branchtip(heads)
334 333
335 334 def iterheads(self):
336 335 """ returns all the heads """
337 336 self._verifyall()
338 337 return self._entries.itervalues()
339 338
340 339 def copy(self):
341 340 """return an deep copy of the branchcache object"""
342 341 return type(self)(
343 342 self._entries, self.tipnode, self.tiprev, self.filteredhash,
344 343 self._closednodes)
345 344
346 345 def write(self, repo):
347 346 try:
348 347 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
349 348 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
350 349 if self.filteredhash is not None:
351 350 cachekey.append(hex(self.filteredhash))
352 351 f.write(" ".join(cachekey) + '\n')
353 352 nodecount = 0
354 353 for label, nodes in sorted(self.iteritems()):
355 354 label = encoding.fromlocal(label)
356 355 for node in nodes:
357 356 nodecount += 1
358 357 if node in self._closednodes:
359 358 state = 'c'
360 359 else:
361 360 state = 'o'
362 361 f.write("%s %s %s\n" % (hex(node), state, label))
363 362 f.close()
364 363 repo.ui.log('branchcache',
365 364 'wrote %s branch cache with %d labels and %d nodes\n',
366 365 repo.filtername, len(self._entries), nodecount)
367 366 except (IOError, OSError, error.Abort) as inst:
368 367 # Abort may be raised by read only opener, so log and continue
369 368 repo.ui.debug("couldn't write branch cache: %s\n" %
370 369 stringutil.forcebytestr(inst))
371 370
372 371 def update(self, repo, revgen):
373 372 """Given a branchhead cache, self, that may have extra nodes or be
374 373 missing heads, and a generator of nodes that are strictly a superset of
375 374 heads missing, this function updates self to be correct.
376 375 """
377 376 starttime = util.timer()
378 377 cl = repo.changelog
379 378 # collect new branch entries
380 379 newbranches = {}
381 380 getbranchinfo = repo.revbranchcache().branchinfo
382 381 for r in revgen:
383 382 branch, closesbranch = getbranchinfo(r)
384 383 newbranches.setdefault(branch, []).append(r)
385 384 if closesbranch:
386 385 self._closednodes.add(cl.node(r))
387 386
388 387 # fetch current topological heads to speed up filtering
389 388 topoheads = set(cl.headrevs())
390 389
391 390 # if older branchheads are reachable from new ones, they aren't
392 391 # really branchheads. Note checking parents is insufficient:
393 392 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
394 393 for branch, newheadrevs in newbranches.iteritems():
395 394 bheads = self._entries.setdefault(branch, [])
396 395 bheadset = set(cl.rev(node) for node in bheads)
397 396
398 397 # This have been tested True on all internal usage of this function.
399 398 # run it again in case of doubt
400 399 # assert not (set(bheadrevs) & set(newheadrevs))
401 400 bheadset.update(newheadrevs)
402 401
403 402 # This prunes out two kinds of heads - heads that are superseded by
404 403 # a head in newheadrevs, and newheadrevs that are not heads because
405 404 # an existing head is their descendant.
406 405 uncertain = bheadset - topoheads
407 406 if uncertain:
408 407 floorrev = min(uncertain)
409 408 ancestors = set(cl.ancestors(newheadrevs, floorrev))
410 409 bheadset -= ancestors
411 410 bheadrevs = sorted(bheadset)
412 411 self[branch] = [cl.node(rev) for rev in bheadrevs]
413 412 tiprev = bheadrevs[-1]
414 413 if tiprev > self.tiprev:
415 414 self.tipnode = cl.node(tiprev)
416 415 self.tiprev = tiprev
417 416
418 417 if not self.validfor(repo):
419 418 # cache key are not valid anymore
420 419 self.tipnode = nullid
421 420 self.tiprev = nullrev
422 421 for heads in self.iterheads():
423 422 tiprev = max(cl.rev(node) for node in heads)
424 423 if tiprev > self.tiprev:
425 424 self.tipnode = cl.node(tiprev)
426 425 self.tiprev = tiprev
427 426 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
428 427
429 428 duration = util.timer() - starttime
430 429 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
431 430 repo.filtername or b'None', duration)
432 431
433 432 self.write(repo)
434 433
435 434
436 435 class remotebranchcache(branchcache):
437 436 """Branchmap info for a remote connection, should not write locally"""
438 437 def write(self, repo):
439 438 pass
440 439
441 440
442 441 # Revision branch info cache
443 442
444 443 _rbcversion = '-v1'
445 444 _rbcnames = 'rbc-names' + _rbcversion
446 445 _rbcrevs = 'rbc-revs' + _rbcversion
447 446 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
448 447 _rbcrecfmt = '>4sI'
449 448 _rbcrecsize = calcsize(_rbcrecfmt)
450 449 _rbcnodelen = 4
451 450 _rbcbranchidxmask = 0x7fffffff
452 451 _rbccloseflag = 0x80000000
453 452
454 453 class revbranchcache(object):
455 454 """Persistent cache, mapping from revision number to branch name and close.
456 455 This is a low level cache, independent of filtering.
457 456
458 457 Branch names are stored in rbc-names in internal encoding separated by 0.
459 458 rbc-names is append-only, and each branch name is only stored once and will
460 459 thus have a unique index.
461 460
462 461 The branch info for each revision is stored in rbc-revs as constant size
463 462 records. The whole file is read into memory, but it is only 'parsed' on
464 463 demand. The file is usually append-only but will be truncated if repo
465 464 modification is detected.
466 465 The record for each revision contains the first 4 bytes of the
467 466 corresponding node hash, and the record is only used if it still matches.
468 467 Even a completely trashed rbc-revs fill thus still give the right result
469 468 while converging towards full recovery ... assuming no incorrectly matching
470 469 node hashes.
471 470 The record also contains 4 bytes where 31 bits contains the index of the
472 471 branch and the last bit indicate that it is a branch close commit.
473 472 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
474 473 and will grow with it but be 1/8th of its size.
475 474 """
476 475
477 476 def __init__(self, repo, readonly=True):
478 477 assert repo.filtername is None
479 478 self._repo = repo
480 479 self._names = [] # branch names in local encoding with static index
481 480 self._rbcrevs = bytearray()
482 481 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
483 482 try:
484 483 bndata = repo.cachevfs.read(_rbcnames)
485 484 self._rbcsnameslen = len(bndata) # for verification before writing
486 485 if bndata:
487 486 self._names = [encoding.tolocal(bn)
488 487 for bn in bndata.split('\0')]
489 488 except (IOError, OSError):
490 489 if readonly:
491 490 # don't try to use cache - fall back to the slow path
492 491 self.branchinfo = self._branchinfo
493 492
494 493 if self._names:
495 494 try:
496 495 data = repo.cachevfs.read(_rbcrevs)
497 496 self._rbcrevs[:] = data
498 497 except (IOError, OSError) as inst:
499 498 repo.ui.debug("couldn't read revision branch cache: %s\n" %
500 499 stringutil.forcebytestr(inst))
501 500 # remember number of good records on disk
502 501 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
503 502 len(repo.changelog))
504 503 if self._rbcrevslen == 0:
505 504 self._names = []
506 505 self._rbcnamescount = len(self._names) # number of names read at
507 506 # _rbcsnameslen
508 507
509 508 def _clear(self):
510 509 self._rbcsnameslen = 0
511 510 del self._names[:]
512 511 self._rbcnamescount = 0
513 512 self._rbcrevslen = len(self._repo.changelog)
514 513 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
515 514 util.clearcachedproperty(self, '_namesreverse')
516 515
517 516 @util.propertycache
518 517 def _namesreverse(self):
519 518 return dict((b, r) for r, b in enumerate(self._names))
520 519
521 520 def branchinfo(self, rev):
522 521 """Return branch name and close flag for rev, using and updating
523 522 persistent cache."""
524 523 changelog = self._repo.changelog
525 524 rbcrevidx = rev * _rbcrecsize
526 525
527 526 # avoid negative index, changelog.read(nullrev) is fast without cache
528 527 if rev == nullrev:
529 528 return changelog.branchinfo(rev)
530 529
531 530 # if requested rev isn't allocated, grow and cache the rev info
532 531 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
533 532 return self._branchinfo(rev)
534 533
535 534 # fast path: extract data from cache, use it if node is matching
536 535 reponode = changelog.node(rev)[:_rbcnodelen]
537 536 cachenode, branchidx = unpack_from(
538 537 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
539 538 close = bool(branchidx & _rbccloseflag)
540 539 if close:
541 540 branchidx &= _rbcbranchidxmask
542 541 if cachenode == '\0\0\0\0':
543 542 pass
544 543 elif cachenode == reponode:
545 544 try:
546 545 return self._names[branchidx], close
547 546 except IndexError:
548 547 # recover from invalid reference to unknown branch
549 548 self._repo.ui.debug("referenced branch names not found"
550 549 " - rebuilding revision branch cache from scratch\n")
551 550 self._clear()
552 551 else:
553 552 # rev/node map has changed, invalidate the cache from here up
554 553 self._repo.ui.debug("history modification detected - truncating "
555 554 "revision branch cache to revision %d\n" % rev)
556 555 truncate = rbcrevidx + _rbcrecsize
557 556 del self._rbcrevs[truncate:]
558 557 self._rbcrevslen = min(self._rbcrevslen, truncate)
559 558
560 559 # fall back to slow path and make sure it will be written to disk
561 560 return self._branchinfo(rev)
562 561
563 562 def _branchinfo(self, rev):
564 563 """Retrieve branch info from changelog and update _rbcrevs"""
565 564 changelog = self._repo.changelog
566 565 b, close = changelog.branchinfo(rev)
567 566 if b in self._namesreverse:
568 567 branchidx = self._namesreverse[b]
569 568 else:
570 569 branchidx = len(self._names)
571 570 self._names.append(b)
572 571 self._namesreverse[b] = branchidx
573 572 reponode = changelog.node(rev)
574 573 if close:
575 574 branchidx |= _rbccloseflag
576 575 self._setcachedata(rev, reponode, branchidx)
577 576 return b, close
578 577
579 578 def setdata(self, branch, rev, node, close):
580 579 """add new data information to the cache"""
581 580 if branch in self._namesreverse:
582 581 branchidx = self._namesreverse[branch]
583 582 else:
584 583 branchidx = len(self._names)
585 584 self._names.append(branch)
586 585 self._namesreverse[branch] = branchidx
587 586 if close:
588 587 branchidx |= _rbccloseflag
589 588 self._setcachedata(rev, node, branchidx)
590 589 # If no cache data were readable (non exists, bad permission, etc)
591 590 # the cache was bypassing itself by setting:
592 591 #
593 592 # self.branchinfo = self._branchinfo
594 593 #
595 594 # Since we now have data in the cache, we need to drop this bypassing.
596 595 if r'branchinfo' in vars(self):
597 596 del self.branchinfo
598 597
599 598 def _setcachedata(self, rev, node, branchidx):
600 599 """Writes the node's branch data to the in-memory cache data."""
601 600 if rev == nullrev:
602 601 return
603 602 rbcrevidx = rev * _rbcrecsize
604 603 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
605 604 self._rbcrevs.extend('\0' *
606 605 (len(self._repo.changelog) * _rbcrecsize -
607 606 len(self._rbcrevs)))
608 607 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
609 608 self._rbcrevslen = min(self._rbcrevslen, rev)
610 609
611 610 tr = self._repo.currenttransaction()
612 611 if tr:
613 612 tr.addfinalize('write-revbranchcache', self.write)
614 613
615 614 def write(self, tr=None):
616 615 """Save branch cache if it is dirty."""
617 616 repo = self._repo
618 617 wlock = None
619 618 step = ''
620 619 try:
621 620 if self._rbcnamescount < len(self._names):
622 621 step = ' names'
623 622 wlock = repo.wlock(wait=False)
624 623 if self._rbcnamescount != 0:
625 624 f = repo.cachevfs.open(_rbcnames, 'ab')
626 625 if f.tell() == self._rbcsnameslen:
627 626 f.write('\0')
628 627 else:
629 628 f.close()
630 629 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
631 630 self._rbcnamescount = 0
632 631 self._rbcrevslen = 0
633 632 if self._rbcnamescount == 0:
634 633 # before rewriting names, make sure references are removed
635 634 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
636 635 f = repo.cachevfs.open(_rbcnames, 'wb')
637 636 f.write('\0'.join(encoding.fromlocal(b)
638 637 for b in self._names[self._rbcnamescount:]))
639 638 self._rbcsnameslen = f.tell()
640 639 f.close()
641 640 self._rbcnamescount = len(self._names)
642 641
643 642 start = self._rbcrevslen * _rbcrecsize
644 643 if start != len(self._rbcrevs):
645 644 step = ''
646 645 if wlock is None:
647 646 wlock = repo.wlock(wait=False)
648 647 revs = min(len(repo.changelog),
649 648 len(self._rbcrevs) // _rbcrecsize)
650 649 f = repo.cachevfs.open(_rbcrevs, 'ab')
651 650 if f.tell() != start:
652 651 repo.ui.debug("truncating cache/%s to %d\n"
653 652 % (_rbcrevs, start))
654 653 f.seek(start)
655 654 if f.tell() != start:
656 655 start = 0
657 656 f.seek(start)
658 657 f.truncate()
659 658 end = revs * _rbcrecsize
660 659 f.write(self._rbcrevs[start:end])
661 660 f.close()
662 661 self._rbcrevslen = revs
663 662 except (IOError, OSError, error.Abort, error.LockError) as inst:
664 663 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
665 664 % (step, stringutil.forcebytestr(inst)))
666 665 finally:
667 666 if wlock is not None:
668 667 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now