##// END OF EJS Templates
branchcache: have a hasnode function to validate nodes...
Pulkit Goyal -
r42174:b5511845 default
parent child Browse files
Show More
@@ -1,626 +1,634 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129
130 130 class branchcache(object):
131 131 """A dict like object that hold branches heads cache.
132 132
133 133 This cache is used to avoid costly computations to determine all the
134 134 branch heads of a repo.
135 135
136 136 The cache is serialized on disk in the following format:
137 137
138 138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 139 <branch head hex node> <open/closed state> <branch name>
140 140 <branch head hex node> <open/closed state> <branch name>
141 141 ...
142 142
143 143 The first line is used to check if the cache is still valid. If the
144 144 branch cache is for a filtered repo view, an optional third hash is
145 145 included that hashes the hashes of all filtered revisions.
146 146
147 147 The open/closed state is represented by a single letter 'o' or 'c'.
148 148 This field can be used to avoid changelog reads when determining if a
149 149 branch head closes a branch or not.
150 150 """
151 151
152 152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 filteredhash=None, closednodes=None):
153 filteredhash=None, closednodes=None, hasnode=None):
154 """ hasnode is a function which can be used to verify whether changelog
155 has a given node or not. If it's not provided, we assume that every node
156 we have exists in changelog """
154 157 self.tipnode = tipnode
155 158 self.tiprev = tiprev
156 159 self.filteredhash = filteredhash
157 160 # closednodes is a set of nodes that close their branch. If the branch
158 161 # cache has been updated, it may contain nodes that are no longer
159 162 # heads.
160 163 if closednodes is None:
161 164 self._closednodes = set()
162 165 else:
163 166 self._closednodes = closednodes
164 167 self._entries = dict(entries)
165 168 # whether closed nodes are verified or not
166 169 self._closedverified = False
167 170 # branches for which nodes are verified
168 171 self._verifiedbranches = set()
172 self._hasnode = hasnode
173 if self._hasnode is None:
174 self._hasnode = lambda x: True
169 175
170 176 def __iter__(self):
171 177 return iter(self._entries)
172 178
173 179 def __setitem__(self, key, value):
174 180 self._entries[key] = value
175 181
176 182 def __getitem__(self, key):
177 183 return self._entries[key]
178 184
179 185 def iteritems(self):
180 186 return self._entries.iteritems()
181 187
182 188 def hasbranch(self, label):
183 189 """ checks whether a branch of this name exists or not """
184 190 return label in self._entries
185 191
186 192 @classmethod
187 193 def fromfile(cls, repo):
188 194 f = None
189 195 try:
190 196 f = repo.cachevfs(cls._filename(repo))
191 197 lineiter = iter(f)
192 198 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
193 199 last, lrev = cachekey[:2]
194 200 last, lrev = bin(last), int(lrev)
195 201 filteredhash = None
202 hasnode = repo.changelog.hasnode
196 203 if len(cachekey) > 2:
197 204 filteredhash = bin(cachekey[2])
198 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
205 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
206 hasnode=hasnode)
199 207 if not bcache.validfor(repo):
200 208 # invalidate the cache
201 209 raise ValueError(r'tip differs')
202 210 bcache.load(repo, lineiter)
203 211 except (IOError, OSError):
204 212 return None
205 213
206 214 except Exception as inst:
207 215 if repo.ui.debugflag:
208 216 msg = 'invalid branchheads cache'
209 217 if repo.filtername is not None:
210 218 msg += ' (%s)' % repo.filtername
211 219 msg += ': %s\n'
212 220 repo.ui.debug(msg % pycompat.bytestr(inst))
213 221 bcache = None
214 222
215 223 finally:
216 224 if f:
217 225 f.close()
218 226
219 227 return bcache
220 228
221 229 def load(self, repo, lineiter):
222 230 """ fully loads the branchcache by reading from the file using the line
223 231 iterator passed"""
224 232 cl = repo.changelog
225 233 for line in lineiter:
226 234 line = line.rstrip('\n')
227 235 if not line:
228 236 continue
229 237 node, state, label = line.split(" ", 2)
230 238 if state not in 'oc':
231 239 raise ValueError(r'invalid branch state')
232 240 label = encoding.tolocal(label.strip())
233 241 node = bin(node)
234 242 if not cl.hasnode(node):
235 243 raise ValueError(
236 244 r'node %s does not exist' % pycompat.sysstr(hex(node)))
237 245 self._entries.setdefault(label, []).append(node)
238 246 self._verifiedbranches.add(label)
239 247 if state == 'c':
240 248 self._closednodes.add(node)
241 249 self._closedverified = True
242 250
243 251 @staticmethod
244 252 def _filename(repo):
245 253 """name of a branchcache file for a given repo or repoview"""
246 254 filename = "branch2"
247 255 if repo.filtername:
248 256 filename = '%s-%s' % (filename, repo.filtername)
249 257 return filename
250 258
251 259 def validfor(self, repo):
252 260 """Is the cache content valid regarding a repo
253 261
254 262 - False when cached tipnode is unknown or if we detect a strip.
255 263 - True when cache is up to date or a subset of current repo."""
256 264 try:
257 265 return ((self.tipnode == repo.changelog.node(self.tiprev))
258 266 and (self.filteredhash ==
259 267 scmutil.filteredhash(repo, self.tiprev)))
260 268 except IndexError:
261 269 return False
262 270
263 271 def _branchtip(self, heads):
264 272 '''Return tuple with last open head in heads and false,
265 273 otherwise return last closed head and true.'''
266 274 tip = heads[-1]
267 275 closed = True
268 276 for h in reversed(heads):
269 277 if h not in self._closednodes:
270 278 tip = h
271 279 closed = False
272 280 break
273 281 return tip, closed
274 282
275 283 def branchtip(self, branch):
276 284 '''Return the tipmost open head on branch head, otherwise return the
277 285 tipmost closed head on branch.
278 286 Raise KeyError for unknown branch.'''
279 287 return self._branchtip(self[branch])[0]
280 288
281 289 def iteropen(self, nodes):
282 290 return (n for n in nodes if n not in self._closednodes)
283 291
284 292 def branchheads(self, branch, closed=False):
285 293 heads = self[branch]
286 294 if not closed:
287 295 heads = list(self.iteropen(heads))
288 296 return heads
289 297
290 298 def iterbranches(self):
291 299 for bn, heads in self.iteritems():
292 300 yield (bn, heads) + self._branchtip(heads)
293 301
294 302 def iterheads(self):
295 303 """ returns all the heads """
296 304 return self._entries.itervalues()
297 305
298 306 def copy(self):
299 307 """return an deep copy of the branchcache object"""
300 308 return branchcache(
301 309 self._entries, self.tipnode, self.tiprev, self.filteredhash,
302 310 self._closednodes)
303 311
304 312 def write(self, repo):
305 313 try:
306 314 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
307 315 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
308 316 if self.filteredhash is not None:
309 317 cachekey.append(hex(self.filteredhash))
310 318 f.write(" ".join(cachekey) + '\n')
311 319 nodecount = 0
312 320 for label, nodes in sorted(self.iteritems()):
313 321 label = encoding.fromlocal(label)
314 322 for node in nodes:
315 323 nodecount += 1
316 324 if node in self._closednodes:
317 325 state = 'c'
318 326 else:
319 327 state = 'o'
320 328 f.write("%s %s %s\n" % (hex(node), state, label))
321 329 f.close()
322 330 repo.ui.log('branchcache',
323 331 'wrote %s branch cache with %d labels and %d nodes\n',
324 332 repo.filtername, len(self._entries), nodecount)
325 333 except (IOError, OSError, error.Abort) as inst:
326 334 # Abort may be raised by read only opener, so log and continue
327 335 repo.ui.debug("couldn't write branch cache: %s\n" %
328 336 stringutil.forcebytestr(inst))
329 337
330 338 def update(self, repo, revgen):
331 339 """Given a branchhead cache, self, that may have extra nodes or be
332 340 missing heads, and a generator of nodes that are strictly a superset of
333 341 heads missing, this function updates self to be correct.
334 342 """
335 343 starttime = util.timer()
336 344 cl = repo.changelog
337 345 # collect new branch entries
338 346 newbranches = {}
339 347 getbranchinfo = repo.revbranchcache().branchinfo
340 348 for r in revgen:
341 349 branch, closesbranch = getbranchinfo(r)
342 350 newbranches.setdefault(branch, []).append(r)
343 351 if closesbranch:
344 352 self._closednodes.add(cl.node(r))
345 353
346 354 # fetch current topological heads to speed up filtering
347 355 topoheads = set(cl.headrevs())
348 356
349 357 # if older branchheads are reachable from new ones, they aren't
350 358 # really branchheads. Note checking parents is insufficient:
351 359 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
352 360 for branch, newheadrevs in newbranches.iteritems():
353 361 bheads = self._entries.setdefault(branch, [])
354 362 bheadset = set(cl.rev(node) for node in bheads)
355 363
356 364 # This have been tested True on all internal usage of this function.
357 365 # run it again in case of doubt
358 366 # assert not (set(bheadrevs) & set(newheadrevs))
359 367 bheadset.update(newheadrevs)
360 368
361 369 # This prunes out two kinds of heads - heads that are superseded by
362 370 # a head in newheadrevs, and newheadrevs that are not heads because
363 371 # an existing head is their descendant.
364 372 uncertain = bheadset - topoheads
365 373 if uncertain:
366 374 floorrev = min(uncertain)
367 375 ancestors = set(cl.ancestors(newheadrevs, floorrev))
368 376 bheadset -= ancestors
369 377 bheadrevs = sorted(bheadset)
370 378 self[branch] = [cl.node(rev) for rev in bheadrevs]
371 379 tiprev = bheadrevs[-1]
372 380 if tiprev > self.tiprev:
373 381 self.tipnode = cl.node(tiprev)
374 382 self.tiprev = tiprev
375 383
376 384 if not self.validfor(repo):
377 385 # cache key are not valid anymore
378 386 self.tipnode = nullid
379 387 self.tiprev = nullrev
380 388 for heads in self.iterheads():
381 389 tiprev = max(cl.rev(node) for node in heads)
382 390 if tiprev > self.tiprev:
383 391 self.tipnode = cl.node(tiprev)
384 392 self.tiprev = tiprev
385 393 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
386 394
387 395 duration = util.timer() - starttime
388 396 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
389 397 repo.filtername or b'None', duration)
390 398
391 399 self.write(repo)
392 400
393 401
394 402 class remotebranchcache(branchcache):
395 403 """Branchmap info for a remote connection, should not write locally"""
396 404 def write(self, repo):
397 405 pass
398 406
399 407
400 408 # Revision branch info cache
401 409
402 410 _rbcversion = '-v1'
403 411 _rbcnames = 'rbc-names' + _rbcversion
404 412 _rbcrevs = 'rbc-revs' + _rbcversion
405 413 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
406 414 _rbcrecfmt = '>4sI'
407 415 _rbcrecsize = calcsize(_rbcrecfmt)
408 416 _rbcnodelen = 4
409 417 _rbcbranchidxmask = 0x7fffffff
410 418 _rbccloseflag = 0x80000000
411 419
412 420 class revbranchcache(object):
413 421 """Persistent cache, mapping from revision number to branch name and close.
414 422 This is a low level cache, independent of filtering.
415 423
416 424 Branch names are stored in rbc-names in internal encoding separated by 0.
417 425 rbc-names is append-only, and each branch name is only stored once and will
418 426 thus have a unique index.
419 427
420 428 The branch info for each revision is stored in rbc-revs as constant size
421 429 records. The whole file is read into memory, but it is only 'parsed' on
422 430 demand. The file is usually append-only but will be truncated if repo
423 431 modification is detected.
424 432 The record for each revision contains the first 4 bytes of the
425 433 corresponding node hash, and the record is only used if it still matches.
426 434 Even a completely trashed rbc-revs fill thus still give the right result
427 435 while converging towards full recovery ... assuming no incorrectly matching
428 436 node hashes.
429 437 The record also contains 4 bytes where 31 bits contains the index of the
430 438 branch and the last bit indicate that it is a branch close commit.
431 439 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
432 440 and will grow with it but be 1/8th of its size.
433 441 """
434 442
435 443 def __init__(self, repo, readonly=True):
436 444 assert repo.filtername is None
437 445 self._repo = repo
438 446 self._names = [] # branch names in local encoding with static index
439 447 self._rbcrevs = bytearray()
440 448 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
441 449 try:
442 450 bndata = repo.cachevfs.read(_rbcnames)
443 451 self._rbcsnameslen = len(bndata) # for verification before writing
444 452 if bndata:
445 453 self._names = [encoding.tolocal(bn)
446 454 for bn in bndata.split('\0')]
447 455 except (IOError, OSError):
448 456 if readonly:
449 457 # don't try to use cache - fall back to the slow path
450 458 self.branchinfo = self._branchinfo
451 459
452 460 if self._names:
453 461 try:
454 462 data = repo.cachevfs.read(_rbcrevs)
455 463 self._rbcrevs[:] = data
456 464 except (IOError, OSError) as inst:
457 465 repo.ui.debug("couldn't read revision branch cache: %s\n" %
458 466 stringutil.forcebytestr(inst))
459 467 # remember number of good records on disk
460 468 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
461 469 len(repo.changelog))
462 470 if self._rbcrevslen == 0:
463 471 self._names = []
464 472 self._rbcnamescount = len(self._names) # number of names read at
465 473 # _rbcsnameslen
466 474
467 475 def _clear(self):
468 476 self._rbcsnameslen = 0
469 477 del self._names[:]
470 478 self._rbcnamescount = 0
471 479 self._rbcrevslen = len(self._repo.changelog)
472 480 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
473 481 util.clearcachedproperty(self, '_namesreverse')
474 482
475 483 @util.propertycache
476 484 def _namesreverse(self):
477 485 return dict((b, r) for r, b in enumerate(self._names))
478 486
479 487 def branchinfo(self, rev):
480 488 """Return branch name and close flag for rev, using and updating
481 489 persistent cache."""
482 490 changelog = self._repo.changelog
483 491 rbcrevidx = rev * _rbcrecsize
484 492
485 493 # avoid negative index, changelog.read(nullrev) is fast without cache
486 494 if rev == nullrev:
487 495 return changelog.branchinfo(rev)
488 496
489 497 # if requested rev isn't allocated, grow and cache the rev info
490 498 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
491 499 return self._branchinfo(rev)
492 500
493 501 # fast path: extract data from cache, use it if node is matching
494 502 reponode = changelog.node(rev)[:_rbcnodelen]
495 503 cachenode, branchidx = unpack_from(
496 504 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
497 505 close = bool(branchidx & _rbccloseflag)
498 506 if close:
499 507 branchidx &= _rbcbranchidxmask
500 508 if cachenode == '\0\0\0\0':
501 509 pass
502 510 elif cachenode == reponode:
503 511 try:
504 512 return self._names[branchidx], close
505 513 except IndexError:
506 514 # recover from invalid reference to unknown branch
507 515 self._repo.ui.debug("referenced branch names not found"
508 516 " - rebuilding revision branch cache from scratch\n")
509 517 self._clear()
510 518 else:
511 519 # rev/node map has changed, invalidate the cache from here up
512 520 self._repo.ui.debug("history modification detected - truncating "
513 521 "revision branch cache to revision %d\n" % rev)
514 522 truncate = rbcrevidx + _rbcrecsize
515 523 del self._rbcrevs[truncate:]
516 524 self._rbcrevslen = min(self._rbcrevslen, truncate)
517 525
518 526 # fall back to slow path and make sure it will be written to disk
519 527 return self._branchinfo(rev)
520 528
521 529 def _branchinfo(self, rev):
522 530 """Retrieve branch info from changelog and update _rbcrevs"""
523 531 changelog = self._repo.changelog
524 532 b, close = changelog.branchinfo(rev)
525 533 if b in self._namesreverse:
526 534 branchidx = self._namesreverse[b]
527 535 else:
528 536 branchidx = len(self._names)
529 537 self._names.append(b)
530 538 self._namesreverse[b] = branchidx
531 539 reponode = changelog.node(rev)
532 540 if close:
533 541 branchidx |= _rbccloseflag
534 542 self._setcachedata(rev, reponode, branchidx)
535 543 return b, close
536 544
537 545 def setdata(self, branch, rev, node, close):
538 546 """add new data information to the cache"""
539 547 if branch in self._namesreverse:
540 548 branchidx = self._namesreverse[branch]
541 549 else:
542 550 branchidx = len(self._names)
543 551 self._names.append(branch)
544 552 self._namesreverse[branch] = branchidx
545 553 if close:
546 554 branchidx |= _rbccloseflag
547 555 self._setcachedata(rev, node, branchidx)
548 556 # If no cache data were readable (non exists, bad permission, etc)
549 557 # the cache was bypassing itself by setting:
550 558 #
551 559 # self.branchinfo = self._branchinfo
552 560 #
553 561 # Since we now have data in the cache, we need to drop this bypassing.
554 562 if r'branchinfo' in vars(self):
555 563 del self.branchinfo
556 564
557 565 def _setcachedata(self, rev, node, branchidx):
558 566 """Writes the node's branch data to the in-memory cache data."""
559 567 if rev == nullrev:
560 568 return
561 569 rbcrevidx = rev * _rbcrecsize
562 570 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
563 571 self._rbcrevs.extend('\0' *
564 572 (len(self._repo.changelog) * _rbcrecsize -
565 573 len(self._rbcrevs)))
566 574 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
567 575 self._rbcrevslen = min(self._rbcrevslen, rev)
568 576
569 577 tr = self._repo.currenttransaction()
570 578 if tr:
571 579 tr.addfinalize('write-revbranchcache', self.write)
572 580
573 581 def write(self, tr=None):
574 582 """Save branch cache if it is dirty."""
575 583 repo = self._repo
576 584 wlock = None
577 585 step = ''
578 586 try:
579 587 if self._rbcnamescount < len(self._names):
580 588 step = ' names'
581 589 wlock = repo.wlock(wait=False)
582 590 if self._rbcnamescount != 0:
583 591 f = repo.cachevfs.open(_rbcnames, 'ab')
584 592 if f.tell() == self._rbcsnameslen:
585 593 f.write('\0')
586 594 else:
587 595 f.close()
588 596 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
589 597 self._rbcnamescount = 0
590 598 self._rbcrevslen = 0
591 599 if self._rbcnamescount == 0:
592 600 # before rewriting names, make sure references are removed
593 601 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
594 602 f = repo.cachevfs.open(_rbcnames, 'wb')
595 603 f.write('\0'.join(encoding.fromlocal(b)
596 604 for b in self._names[self._rbcnamescount:]))
597 605 self._rbcsnameslen = f.tell()
598 606 f.close()
599 607 self._rbcnamescount = len(self._names)
600 608
601 609 start = self._rbcrevslen * _rbcrecsize
602 610 if start != len(self._rbcrevs):
603 611 step = ''
604 612 if wlock is None:
605 613 wlock = repo.wlock(wait=False)
606 614 revs = min(len(repo.changelog),
607 615 len(self._rbcrevs) // _rbcrecsize)
608 616 f = repo.cachevfs.open(_rbcrevs, 'ab')
609 617 if f.tell() != start:
610 618 repo.ui.debug("truncating cache/%s to %d\n"
611 619 % (_rbcrevs, start))
612 620 f.seek(start)
613 621 if f.tell() != start:
614 622 start = 0
615 623 f.seek(start)
616 624 f.truncate()
617 625 end = revs * _rbcrecsize
618 626 f.write(self._rbcrevs[start:end])
619 627 f.close()
620 628 self._rbcrevslen = revs
621 629 except (IOError, OSError, error.Abort, error.LockError) as inst:
622 630 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
623 631 % (step, stringutil.forcebytestr(inst)))
624 632 finally:
625 633 if wlock is not None:
626 634 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now