##// END OF EJS Templates
branchmap: implement __contains__()...
Pulkit Goyal -
r42282:f0def07f default
parent child Browse files
Show More
@@ -1,634 +1,637 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129
130 130 class branchcache(object):
131 131 """A dict like object that hold branches heads cache.
132 132
133 133 This cache is used to avoid costly computations to determine all the
134 134 branch heads of a repo.
135 135
136 136 The cache is serialized on disk in the following format:
137 137
138 138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 139 <branch head hex node> <open/closed state> <branch name>
140 140 <branch head hex node> <open/closed state> <branch name>
141 141 ...
142 142
143 143 The first line is used to check if the cache is still valid. If the
144 144 branch cache is for a filtered repo view, an optional third hash is
145 145 included that hashes the hashes of all filtered revisions.
146 146
147 147 The open/closed state is represented by a single letter 'o' or 'c'.
148 148 This field can be used to avoid changelog reads when determining if a
149 149 branch head closes a branch or not.
150 150 """
151 151
152 152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 153 filteredhash=None, closednodes=None, hasnode=None):
154 154 """ hasnode is a function which can be used to verify whether changelog
155 155 has a given node or not. If it's not provided, we assume that every node
156 156 we have exists in changelog """
157 157 self.tipnode = tipnode
158 158 self.tiprev = tiprev
159 159 self.filteredhash = filteredhash
160 160 # closednodes is a set of nodes that close their branch. If the branch
161 161 # cache has been updated, it may contain nodes that are no longer
162 162 # heads.
163 163 if closednodes is None:
164 164 self._closednodes = set()
165 165 else:
166 166 self._closednodes = closednodes
167 167 self._entries = dict(entries)
168 168 # whether closed nodes are verified or not
169 169 self._closedverified = False
170 170 # branches for which nodes are verified
171 171 self._verifiedbranches = set()
172 172 self._hasnode = hasnode
173 173 if self._hasnode is None:
174 174 self._hasnode = lambda x: True
175 175
176 176 def __iter__(self):
177 177 return iter(self._entries)
178 178
179 179 def __setitem__(self, key, value):
180 180 self._entries[key] = value
181 181
182 182 def __getitem__(self, key):
183 183 return self._entries[key]
184 184
185 def __contains__(self, key):
186 return key in self._entries
187
185 188 def iteritems(self):
186 189 return self._entries.iteritems()
187 190
188 191 def hasbranch(self, label):
189 192 """ checks whether a branch of this name exists or not """
190 193 return label in self._entries
191 194
192 195 @classmethod
193 196 def fromfile(cls, repo):
194 197 f = None
195 198 try:
196 199 f = repo.cachevfs(cls._filename(repo))
197 200 lineiter = iter(f)
198 201 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
199 202 last, lrev = cachekey[:2]
200 203 last, lrev = bin(last), int(lrev)
201 204 filteredhash = None
202 205 hasnode = repo.changelog.hasnode
203 206 if len(cachekey) > 2:
204 207 filteredhash = bin(cachekey[2])
205 208 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
206 209 hasnode=hasnode)
207 210 if not bcache.validfor(repo):
208 211 # invalidate the cache
209 212 raise ValueError(r'tip differs')
210 213 bcache.load(repo, lineiter)
211 214 except (IOError, OSError):
212 215 return None
213 216
214 217 except Exception as inst:
215 218 if repo.ui.debugflag:
216 219 msg = 'invalid branchheads cache'
217 220 if repo.filtername is not None:
218 221 msg += ' (%s)' % repo.filtername
219 222 msg += ': %s\n'
220 223 repo.ui.debug(msg % pycompat.bytestr(inst))
221 224 bcache = None
222 225
223 226 finally:
224 227 if f:
225 228 f.close()
226 229
227 230 return bcache
228 231
229 232 def load(self, repo, lineiter):
230 233 """ fully loads the branchcache by reading from the file using the line
231 234 iterator passed"""
232 235 cl = repo.changelog
233 236 for line in lineiter:
234 237 line = line.rstrip('\n')
235 238 if not line:
236 239 continue
237 240 node, state, label = line.split(" ", 2)
238 241 if state not in 'oc':
239 242 raise ValueError(r'invalid branch state')
240 243 label = encoding.tolocal(label.strip())
241 244 node = bin(node)
242 245 if not cl.hasnode(node):
243 246 raise ValueError(
244 247 r'node %s does not exist' % pycompat.sysstr(hex(node)))
245 248 self._entries.setdefault(label, []).append(node)
246 249 self._verifiedbranches.add(label)
247 250 if state == 'c':
248 251 self._closednodes.add(node)
249 252 self._closedverified = True
250 253
251 254 @staticmethod
252 255 def _filename(repo):
253 256 """name of a branchcache file for a given repo or repoview"""
254 257 filename = "branch2"
255 258 if repo.filtername:
256 259 filename = '%s-%s' % (filename, repo.filtername)
257 260 return filename
258 261
259 262 def validfor(self, repo):
260 263 """Is the cache content valid regarding a repo
261 264
262 265 - False when cached tipnode is unknown or if we detect a strip.
263 266 - True when cache is up to date or a subset of current repo."""
264 267 try:
265 268 return ((self.tipnode == repo.changelog.node(self.tiprev))
266 269 and (self.filteredhash ==
267 270 scmutil.filteredhash(repo, self.tiprev)))
268 271 except IndexError:
269 272 return False
270 273
271 274 def _branchtip(self, heads):
272 275 '''Return tuple with last open head in heads and false,
273 276 otherwise return last closed head and true.'''
274 277 tip = heads[-1]
275 278 closed = True
276 279 for h in reversed(heads):
277 280 if h not in self._closednodes:
278 281 tip = h
279 282 closed = False
280 283 break
281 284 return tip, closed
282 285
283 286 def branchtip(self, branch):
284 287 '''Return the tipmost open head on branch head, otherwise return the
285 288 tipmost closed head on branch.
286 289 Raise KeyError for unknown branch.'''
287 290 return self._branchtip(self[branch])[0]
288 291
289 292 def iteropen(self, nodes):
290 293 return (n for n in nodes if n not in self._closednodes)
291 294
292 295 def branchheads(self, branch, closed=False):
293 296 heads = self._entries[branch]
294 297 if not closed:
295 298 heads = list(self.iteropen(heads))
296 299 return heads
297 300
298 301 def iterbranches(self):
299 302 for bn, heads in self.iteritems():
300 303 yield (bn, heads) + self._branchtip(heads)
301 304
302 305 def iterheads(self):
303 306 """ returns all the heads """
304 307 return self._entries.itervalues()
305 308
306 309 def copy(self):
307 310 """return an deep copy of the branchcache object"""
308 311 return type(self)(
309 312 self._entries, self.tipnode, self.tiprev, self.filteredhash,
310 313 self._closednodes)
311 314
312 315 def write(self, repo):
313 316 try:
314 317 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
315 318 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
316 319 if self.filteredhash is not None:
317 320 cachekey.append(hex(self.filteredhash))
318 321 f.write(" ".join(cachekey) + '\n')
319 322 nodecount = 0
320 323 for label, nodes in sorted(self.iteritems()):
321 324 label = encoding.fromlocal(label)
322 325 for node in nodes:
323 326 nodecount += 1
324 327 if node in self._closednodes:
325 328 state = 'c'
326 329 else:
327 330 state = 'o'
328 331 f.write("%s %s %s\n" % (hex(node), state, label))
329 332 f.close()
330 333 repo.ui.log('branchcache',
331 334 'wrote %s branch cache with %d labels and %d nodes\n',
332 335 repo.filtername, len(self._entries), nodecount)
333 336 except (IOError, OSError, error.Abort) as inst:
334 337 # Abort may be raised by read only opener, so log and continue
335 338 repo.ui.debug("couldn't write branch cache: %s\n" %
336 339 stringutil.forcebytestr(inst))
337 340
338 341 def update(self, repo, revgen):
339 342 """Given a branchhead cache, self, that may have extra nodes or be
340 343 missing heads, and a generator of nodes that are strictly a superset of
341 344 heads missing, this function updates self to be correct.
342 345 """
343 346 starttime = util.timer()
344 347 cl = repo.changelog
345 348 # collect new branch entries
346 349 newbranches = {}
347 350 getbranchinfo = repo.revbranchcache().branchinfo
348 351 for r in revgen:
349 352 branch, closesbranch = getbranchinfo(r)
350 353 newbranches.setdefault(branch, []).append(r)
351 354 if closesbranch:
352 355 self._closednodes.add(cl.node(r))
353 356
354 357 # fetch current topological heads to speed up filtering
355 358 topoheads = set(cl.headrevs())
356 359
357 360 # if older branchheads are reachable from new ones, they aren't
358 361 # really branchheads. Note checking parents is insufficient:
359 362 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
360 363 for branch, newheadrevs in newbranches.iteritems():
361 364 bheads = self._entries.setdefault(branch, [])
362 365 bheadset = set(cl.rev(node) for node in bheads)
363 366
364 367 # This have been tested True on all internal usage of this function.
365 368 # run it again in case of doubt
366 369 # assert not (set(bheadrevs) & set(newheadrevs))
367 370 bheadset.update(newheadrevs)
368 371
369 372 # This prunes out two kinds of heads - heads that are superseded by
370 373 # a head in newheadrevs, and newheadrevs that are not heads because
371 374 # an existing head is their descendant.
372 375 uncertain = bheadset - topoheads
373 376 if uncertain:
374 377 floorrev = min(uncertain)
375 378 ancestors = set(cl.ancestors(newheadrevs, floorrev))
376 379 bheadset -= ancestors
377 380 bheadrevs = sorted(bheadset)
378 381 self[branch] = [cl.node(rev) for rev in bheadrevs]
379 382 tiprev = bheadrevs[-1]
380 383 if tiprev > self.tiprev:
381 384 self.tipnode = cl.node(tiprev)
382 385 self.tiprev = tiprev
383 386
384 387 if not self.validfor(repo):
385 388 # cache key are not valid anymore
386 389 self.tipnode = nullid
387 390 self.tiprev = nullrev
388 391 for heads in self.iterheads():
389 392 tiprev = max(cl.rev(node) for node in heads)
390 393 if tiprev > self.tiprev:
391 394 self.tipnode = cl.node(tiprev)
392 395 self.tiprev = tiprev
393 396 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
394 397
395 398 duration = util.timer() - starttime
396 399 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
397 400 repo.filtername or b'None', duration)
398 401
399 402 self.write(repo)
400 403
401 404
402 405 class remotebranchcache(branchcache):
403 406 """Branchmap info for a remote connection, should not write locally"""
404 407 def write(self, repo):
405 408 pass
406 409
407 410
408 411 # Revision branch info cache
409 412
410 413 _rbcversion = '-v1'
411 414 _rbcnames = 'rbc-names' + _rbcversion
412 415 _rbcrevs = 'rbc-revs' + _rbcversion
413 416 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
414 417 _rbcrecfmt = '>4sI'
415 418 _rbcrecsize = calcsize(_rbcrecfmt)
416 419 _rbcnodelen = 4
417 420 _rbcbranchidxmask = 0x7fffffff
418 421 _rbccloseflag = 0x80000000
419 422
420 423 class revbranchcache(object):
421 424 """Persistent cache, mapping from revision number to branch name and close.
422 425 This is a low level cache, independent of filtering.
423 426
424 427 Branch names are stored in rbc-names in internal encoding separated by 0.
425 428 rbc-names is append-only, and each branch name is only stored once and will
426 429 thus have a unique index.
427 430
428 431 The branch info for each revision is stored in rbc-revs as constant size
429 432 records. The whole file is read into memory, but it is only 'parsed' on
430 433 demand. The file is usually append-only but will be truncated if repo
431 434 modification is detected.
432 435 The record for each revision contains the first 4 bytes of the
433 436 corresponding node hash, and the record is only used if it still matches.
434 437 Even a completely trashed rbc-revs fill thus still give the right result
435 438 while converging towards full recovery ... assuming no incorrectly matching
436 439 node hashes.
437 440 The record also contains 4 bytes where 31 bits contains the index of the
438 441 branch and the last bit indicate that it is a branch close commit.
439 442 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
440 443 and will grow with it but be 1/8th of its size.
441 444 """
442 445
443 446 def __init__(self, repo, readonly=True):
444 447 assert repo.filtername is None
445 448 self._repo = repo
446 449 self._names = [] # branch names in local encoding with static index
447 450 self._rbcrevs = bytearray()
448 451 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
449 452 try:
450 453 bndata = repo.cachevfs.read(_rbcnames)
451 454 self._rbcsnameslen = len(bndata) # for verification before writing
452 455 if bndata:
453 456 self._names = [encoding.tolocal(bn)
454 457 for bn in bndata.split('\0')]
455 458 except (IOError, OSError):
456 459 if readonly:
457 460 # don't try to use cache - fall back to the slow path
458 461 self.branchinfo = self._branchinfo
459 462
460 463 if self._names:
461 464 try:
462 465 data = repo.cachevfs.read(_rbcrevs)
463 466 self._rbcrevs[:] = data
464 467 except (IOError, OSError) as inst:
465 468 repo.ui.debug("couldn't read revision branch cache: %s\n" %
466 469 stringutil.forcebytestr(inst))
467 470 # remember number of good records on disk
468 471 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
469 472 len(repo.changelog))
470 473 if self._rbcrevslen == 0:
471 474 self._names = []
472 475 self._rbcnamescount = len(self._names) # number of names read at
473 476 # _rbcsnameslen
474 477
475 478 def _clear(self):
476 479 self._rbcsnameslen = 0
477 480 del self._names[:]
478 481 self._rbcnamescount = 0
479 482 self._rbcrevslen = len(self._repo.changelog)
480 483 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
481 484 util.clearcachedproperty(self, '_namesreverse')
482 485
483 486 @util.propertycache
484 487 def _namesreverse(self):
485 488 return dict((b, r) for r, b in enumerate(self._names))
486 489
487 490 def branchinfo(self, rev):
488 491 """Return branch name and close flag for rev, using and updating
489 492 persistent cache."""
490 493 changelog = self._repo.changelog
491 494 rbcrevidx = rev * _rbcrecsize
492 495
493 496 # avoid negative index, changelog.read(nullrev) is fast without cache
494 497 if rev == nullrev:
495 498 return changelog.branchinfo(rev)
496 499
497 500 # if requested rev isn't allocated, grow and cache the rev info
498 501 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
499 502 return self._branchinfo(rev)
500 503
501 504 # fast path: extract data from cache, use it if node is matching
502 505 reponode = changelog.node(rev)[:_rbcnodelen]
503 506 cachenode, branchidx = unpack_from(
504 507 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
505 508 close = bool(branchidx & _rbccloseflag)
506 509 if close:
507 510 branchidx &= _rbcbranchidxmask
508 511 if cachenode == '\0\0\0\0':
509 512 pass
510 513 elif cachenode == reponode:
511 514 try:
512 515 return self._names[branchidx], close
513 516 except IndexError:
514 517 # recover from invalid reference to unknown branch
515 518 self._repo.ui.debug("referenced branch names not found"
516 519 " - rebuilding revision branch cache from scratch\n")
517 520 self._clear()
518 521 else:
519 522 # rev/node map has changed, invalidate the cache from here up
520 523 self._repo.ui.debug("history modification detected - truncating "
521 524 "revision branch cache to revision %d\n" % rev)
522 525 truncate = rbcrevidx + _rbcrecsize
523 526 del self._rbcrevs[truncate:]
524 527 self._rbcrevslen = min(self._rbcrevslen, truncate)
525 528
526 529 # fall back to slow path and make sure it will be written to disk
527 530 return self._branchinfo(rev)
528 531
529 532 def _branchinfo(self, rev):
530 533 """Retrieve branch info from changelog and update _rbcrevs"""
531 534 changelog = self._repo.changelog
532 535 b, close = changelog.branchinfo(rev)
533 536 if b in self._namesreverse:
534 537 branchidx = self._namesreverse[b]
535 538 else:
536 539 branchidx = len(self._names)
537 540 self._names.append(b)
538 541 self._namesreverse[b] = branchidx
539 542 reponode = changelog.node(rev)
540 543 if close:
541 544 branchidx |= _rbccloseflag
542 545 self._setcachedata(rev, reponode, branchidx)
543 546 return b, close
544 547
545 548 def setdata(self, branch, rev, node, close):
546 549 """add new data information to the cache"""
547 550 if branch in self._namesreverse:
548 551 branchidx = self._namesreverse[branch]
549 552 else:
550 553 branchidx = len(self._names)
551 554 self._names.append(branch)
552 555 self._namesreverse[branch] = branchidx
553 556 if close:
554 557 branchidx |= _rbccloseflag
555 558 self._setcachedata(rev, node, branchidx)
556 559 # If no cache data were readable (non exists, bad permission, etc)
557 560 # the cache was bypassing itself by setting:
558 561 #
559 562 # self.branchinfo = self._branchinfo
560 563 #
561 564 # Since we now have data in the cache, we need to drop this bypassing.
562 565 if r'branchinfo' in vars(self):
563 566 del self.branchinfo
564 567
565 568 def _setcachedata(self, rev, node, branchidx):
566 569 """Writes the node's branch data to the in-memory cache data."""
567 570 if rev == nullrev:
568 571 return
569 572 rbcrevidx = rev * _rbcrecsize
570 573 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
571 574 self._rbcrevs.extend('\0' *
572 575 (len(self._repo.changelog) * _rbcrecsize -
573 576 len(self._rbcrevs)))
574 577 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
575 578 self._rbcrevslen = min(self._rbcrevslen, rev)
576 579
577 580 tr = self._repo.currenttransaction()
578 581 if tr:
579 582 tr.addfinalize('write-revbranchcache', self.write)
580 583
581 584 def write(self, tr=None):
582 585 """Save branch cache if it is dirty."""
583 586 repo = self._repo
584 587 wlock = None
585 588 step = ''
586 589 try:
587 590 if self._rbcnamescount < len(self._names):
588 591 step = ' names'
589 592 wlock = repo.wlock(wait=False)
590 593 if self._rbcnamescount != 0:
591 594 f = repo.cachevfs.open(_rbcnames, 'ab')
592 595 if f.tell() == self._rbcsnameslen:
593 596 f.write('\0')
594 597 else:
595 598 f.close()
596 599 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
597 600 self._rbcnamescount = 0
598 601 self._rbcrevslen = 0
599 602 if self._rbcnamescount == 0:
600 603 # before rewriting names, make sure references are removed
601 604 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
602 605 f = repo.cachevfs.open(_rbcnames, 'wb')
603 606 f.write('\0'.join(encoding.fromlocal(b)
604 607 for b in self._names[self._rbcnamescount:]))
605 608 self._rbcsnameslen = f.tell()
606 609 f.close()
607 610 self._rbcnamescount = len(self._names)
608 611
609 612 start = self._rbcrevslen * _rbcrecsize
610 613 if start != len(self._rbcrevs):
611 614 step = ''
612 615 if wlock is None:
613 616 wlock = repo.wlock(wait=False)
614 617 revs = min(len(repo.changelog),
615 618 len(self._rbcrevs) // _rbcrecsize)
616 619 f = repo.cachevfs.open(_rbcrevs, 'ab')
617 620 if f.tell() != start:
618 621 repo.ui.debug("truncating cache/%s to %d\n"
619 622 % (_rbcrevs, start))
620 623 f.seek(start)
621 624 if f.tell() != start:
622 625 start = 0
623 626 f.seek(start)
624 627 f.truncate()
625 628 end = revs * _rbcrecsize
626 629 f.write(self._rbcrevs[start:end])
627 630 f.close()
628 631 self._rbcrevslen = revs
629 632 except (IOError, OSError, error.Abort, error.LockError) as inst:
630 633 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
631 634 % (step, stringutil.forcebytestr(inst)))
632 635 finally:
633 636 if wlock is not None:
634 637 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now