##// END OF EJS Templates
branchcache: make entries a private attribute...
Pulkit Goyal -
r42172:b137a679 default
parent child Browse files
Show More
@@ -1,620 +1,620 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129
130 130 class branchcache(object):
131 131 """A dict like object that hold branches heads cache.
132 132
133 133 This cache is used to avoid costly computations to determine all the
134 134 branch heads of a repo.
135 135
136 136 The cache is serialized on disk in the following format:
137 137
138 138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 139 <branch head hex node> <open/closed state> <branch name>
140 140 <branch head hex node> <open/closed state> <branch name>
141 141 ...
142 142
143 143 The first line is used to check if the cache is still valid. If the
144 144 branch cache is for a filtered repo view, an optional third hash is
145 145 included that hashes the hashes of all filtered revisions.
146 146
147 147 The open/closed state is represented by a single letter 'o' or 'c'.
148 148 This field can be used to avoid changelog reads when determining if a
149 149 branch head closes a branch or not.
150 150 """
151 151
152 152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 153 filteredhash=None, closednodes=None):
154 154 self.tipnode = tipnode
155 155 self.tiprev = tiprev
156 156 self.filteredhash = filteredhash
157 157 # closednodes is a set of nodes that close their branch. If the branch
158 158 # cache has been updated, it may contain nodes that are no longer
159 159 # heads.
160 160 if closednodes is None:
161 161 self._closednodes = set()
162 162 else:
163 163 self._closednodes = closednodes
164 self.entries = dict(entries)
164 self._entries = dict(entries)
165 165
166 166 def __iter__(self):
167 return iter(self.entries)
167 return iter(self._entries)
168 168
169 169 def __setitem__(self, key, value):
170 self.entries[key] = value
170 self._entries[key] = value
171 171
172 172 def __getitem__(self, key):
173 return self.entries[key]
173 return self._entries[key]
174 174
175 175 def iteritems(self):
176 return self.entries.iteritems()
176 return self._entries.iteritems()
177 177
178 178 def hasbranch(self, label):
179 179 """ checks whether a branch of this name exists or not """
180 return label in self.entries
180 return label in self._entries
181 181
182 182 @classmethod
183 183 def fromfile(cls, repo):
184 184 f = None
185 185 try:
186 186 f = repo.cachevfs(cls._filename(repo))
187 187 lineiter = iter(f)
188 188 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
189 189 last, lrev = cachekey[:2]
190 190 last, lrev = bin(last), int(lrev)
191 191 filteredhash = None
192 192 if len(cachekey) > 2:
193 193 filteredhash = bin(cachekey[2])
194 194 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
195 195 if not bcache.validfor(repo):
196 196 # invalidate the cache
197 197 raise ValueError(r'tip differs')
198 198 bcache.load(repo, lineiter)
199 199 except (IOError, OSError):
200 200 return None
201 201
202 202 except Exception as inst:
203 203 if repo.ui.debugflag:
204 204 msg = 'invalid branchheads cache'
205 205 if repo.filtername is not None:
206 206 msg += ' (%s)' % repo.filtername
207 207 msg += ': %s\n'
208 208 repo.ui.debug(msg % pycompat.bytestr(inst))
209 209 bcache = None
210 210
211 211 finally:
212 212 if f:
213 213 f.close()
214 214
215 215 return bcache
216 216
217 217 def load(self, repo, lineiter):
218 218 """ fully loads the branchcache by reading from the file using the line
219 219 iterator passed"""
220 220 cl = repo.changelog
221 221 for line in lineiter:
222 222 line = line.rstrip('\n')
223 223 if not line:
224 224 continue
225 225 node, state, label = line.split(" ", 2)
226 226 if state not in 'oc':
227 227 raise ValueError(r'invalid branch state')
228 228 label = encoding.tolocal(label.strip())
229 229 node = bin(node)
230 230 if not cl.hasnode(node):
231 231 raise ValueError(
232 232 r'node %s does not exist' % pycompat.sysstr(hex(node)))
233 self.entries.setdefault(label, []).append(node)
233 self._entries.setdefault(label, []).append(node)
234 234 if state == 'c':
235 235 self._closednodes.add(node)
236 236
237 237 @staticmethod
238 238 def _filename(repo):
239 239 """name of a branchcache file for a given repo or repoview"""
240 240 filename = "branch2"
241 241 if repo.filtername:
242 242 filename = '%s-%s' % (filename, repo.filtername)
243 243 return filename
244 244
245 245 def validfor(self, repo):
246 246 """Is the cache content valid regarding a repo
247 247
248 248 - False when cached tipnode is unknown or if we detect a strip.
249 249 - True when cache is up to date or a subset of current repo."""
250 250 try:
251 251 return ((self.tipnode == repo.changelog.node(self.tiprev))
252 252 and (self.filteredhash ==
253 253 scmutil.filteredhash(repo, self.tiprev)))
254 254 except IndexError:
255 255 return False
256 256
257 257 def _branchtip(self, heads):
258 258 '''Return tuple with last open head in heads and false,
259 259 otherwise return last closed head and true.'''
260 260 tip = heads[-1]
261 261 closed = True
262 262 for h in reversed(heads):
263 263 if h not in self._closednodes:
264 264 tip = h
265 265 closed = False
266 266 break
267 267 return tip, closed
268 268
269 269 def branchtip(self, branch):
270 270 '''Return the tipmost open head on branch head, otherwise return the
271 271 tipmost closed head on branch.
272 272 Raise KeyError for unknown branch.'''
273 273 return self._branchtip(self[branch])[0]
274 274
275 275 def iteropen(self, nodes):
276 276 return (n for n in nodes if n not in self._closednodes)
277 277
278 278 def branchheads(self, branch, closed=False):
279 279 heads = self[branch]
280 280 if not closed:
281 281 heads = list(self.iteropen(heads))
282 282 return heads
283 283
284 284 def iterbranches(self):
285 285 for bn, heads in self.iteritems():
286 286 yield (bn, heads) + self._branchtip(heads)
287 287
288 288 def iterheads(self):
289 289 """ returns all the heads """
290 return self.entries.itervalues()
290 return self._entries.itervalues()
291 291
292 292 def copy(self):
293 293 """return an deep copy of the branchcache object"""
294 294 return branchcache(
295 self.entries, self.tipnode, self.tiprev, self.filteredhash,
295 self._entries, self.tipnode, self.tiprev, self.filteredhash,
296 296 self._closednodes)
297 297
298 298 def write(self, repo):
299 299 try:
300 300 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
301 301 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
302 302 if self.filteredhash is not None:
303 303 cachekey.append(hex(self.filteredhash))
304 304 f.write(" ".join(cachekey) + '\n')
305 305 nodecount = 0
306 306 for label, nodes in sorted(self.iteritems()):
307 307 label = encoding.fromlocal(label)
308 308 for node in nodes:
309 309 nodecount += 1
310 310 if node in self._closednodes:
311 311 state = 'c'
312 312 else:
313 313 state = 'o'
314 314 f.write("%s %s %s\n" % (hex(node), state, label))
315 315 f.close()
316 316 repo.ui.log('branchcache',
317 317 'wrote %s branch cache with %d labels and %d nodes\n',
318 repo.filtername, len(self.entries), nodecount)
318 repo.filtername, len(self._entries), nodecount)
319 319 except (IOError, OSError, error.Abort) as inst:
320 320 # Abort may be raised by read only opener, so log and continue
321 321 repo.ui.debug("couldn't write branch cache: %s\n" %
322 322 stringutil.forcebytestr(inst))
323 323
324 324 def update(self, repo, revgen):
325 325 """Given a branchhead cache, self, that may have extra nodes or be
326 326 missing heads, and a generator of nodes that are strictly a superset of
327 327 heads missing, this function updates self to be correct.
328 328 """
329 329 starttime = util.timer()
330 330 cl = repo.changelog
331 331 # collect new branch entries
332 332 newbranches = {}
333 333 getbranchinfo = repo.revbranchcache().branchinfo
334 334 for r in revgen:
335 335 branch, closesbranch = getbranchinfo(r)
336 336 newbranches.setdefault(branch, []).append(r)
337 337 if closesbranch:
338 338 self._closednodes.add(cl.node(r))
339 339
340 340 # fetch current topological heads to speed up filtering
341 341 topoheads = set(cl.headrevs())
342 342
343 343 # if older branchheads are reachable from new ones, they aren't
344 344 # really branchheads. Note checking parents is insufficient:
345 345 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
346 346 for branch, newheadrevs in newbranches.iteritems():
347 bheads = self.entries.setdefault(branch, [])
347 bheads = self._entries.setdefault(branch, [])
348 348 bheadset = set(cl.rev(node) for node in bheads)
349 349
350 350 # This have been tested True on all internal usage of this function.
351 351 # run it again in case of doubt
352 352 # assert not (set(bheadrevs) & set(newheadrevs))
353 353 bheadset.update(newheadrevs)
354 354
355 355 # This prunes out two kinds of heads - heads that are superseded by
356 356 # a head in newheadrevs, and newheadrevs that are not heads because
357 357 # an existing head is their descendant.
358 358 uncertain = bheadset - topoheads
359 359 if uncertain:
360 360 floorrev = min(uncertain)
361 361 ancestors = set(cl.ancestors(newheadrevs, floorrev))
362 362 bheadset -= ancestors
363 363 bheadrevs = sorted(bheadset)
364 364 self[branch] = [cl.node(rev) for rev in bheadrevs]
365 365 tiprev = bheadrevs[-1]
366 366 if tiprev > self.tiprev:
367 367 self.tipnode = cl.node(tiprev)
368 368 self.tiprev = tiprev
369 369
370 370 if not self.validfor(repo):
371 371 # cache key are not valid anymore
372 372 self.tipnode = nullid
373 373 self.tiprev = nullrev
374 374 for heads in self.iterheads():
375 375 tiprev = max(cl.rev(node) for node in heads)
376 376 if tiprev > self.tiprev:
377 377 self.tipnode = cl.node(tiprev)
378 378 self.tiprev = tiprev
379 379 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
380 380
381 381 duration = util.timer() - starttime
382 382 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
383 383 repo.filtername or b'None', duration)
384 384
385 385 self.write(repo)
386 386
387 387
388 388 class remotebranchcache(branchcache):
389 389 """Branchmap info for a remote connection, should not write locally"""
390 390 def write(self, repo):
391 391 pass
392 392
393 393
394 394 # Revision branch info cache
395 395
396 396 _rbcversion = '-v1'
397 397 _rbcnames = 'rbc-names' + _rbcversion
398 398 _rbcrevs = 'rbc-revs' + _rbcversion
399 399 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
400 400 _rbcrecfmt = '>4sI'
401 401 _rbcrecsize = calcsize(_rbcrecfmt)
402 402 _rbcnodelen = 4
403 403 _rbcbranchidxmask = 0x7fffffff
404 404 _rbccloseflag = 0x80000000
405 405
406 406 class revbranchcache(object):
407 407 """Persistent cache, mapping from revision number to branch name and close.
408 408 This is a low level cache, independent of filtering.
409 409
410 410 Branch names are stored in rbc-names in internal encoding separated by 0.
411 411 rbc-names is append-only, and each branch name is only stored once and will
412 412 thus have a unique index.
413 413
414 414 The branch info for each revision is stored in rbc-revs as constant size
415 415 records. The whole file is read into memory, but it is only 'parsed' on
416 416 demand. The file is usually append-only but will be truncated if repo
417 417 modification is detected.
418 418 The record for each revision contains the first 4 bytes of the
419 419 corresponding node hash, and the record is only used if it still matches.
420 420 Even a completely trashed rbc-revs fill thus still give the right result
421 421 while converging towards full recovery ... assuming no incorrectly matching
422 422 node hashes.
423 423 The record also contains 4 bytes where 31 bits contains the index of the
424 424 branch and the last bit indicate that it is a branch close commit.
425 425 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
426 426 and will grow with it but be 1/8th of its size.
427 427 """
428 428
429 429 def __init__(self, repo, readonly=True):
430 430 assert repo.filtername is None
431 431 self._repo = repo
432 432 self._names = [] # branch names in local encoding with static index
433 433 self._rbcrevs = bytearray()
434 434 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
435 435 try:
436 436 bndata = repo.cachevfs.read(_rbcnames)
437 437 self._rbcsnameslen = len(bndata) # for verification before writing
438 438 if bndata:
439 439 self._names = [encoding.tolocal(bn)
440 440 for bn in bndata.split('\0')]
441 441 except (IOError, OSError):
442 442 if readonly:
443 443 # don't try to use cache - fall back to the slow path
444 444 self.branchinfo = self._branchinfo
445 445
446 446 if self._names:
447 447 try:
448 448 data = repo.cachevfs.read(_rbcrevs)
449 449 self._rbcrevs[:] = data
450 450 except (IOError, OSError) as inst:
451 451 repo.ui.debug("couldn't read revision branch cache: %s\n" %
452 452 stringutil.forcebytestr(inst))
453 453 # remember number of good records on disk
454 454 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
455 455 len(repo.changelog))
456 456 if self._rbcrevslen == 0:
457 457 self._names = []
458 458 self._rbcnamescount = len(self._names) # number of names read at
459 459 # _rbcsnameslen
460 460
461 461 def _clear(self):
462 462 self._rbcsnameslen = 0
463 463 del self._names[:]
464 464 self._rbcnamescount = 0
465 465 self._rbcrevslen = len(self._repo.changelog)
466 466 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
467 467 util.clearcachedproperty(self, '_namesreverse')
468 468
469 469 @util.propertycache
470 470 def _namesreverse(self):
471 471 return dict((b, r) for r, b in enumerate(self._names))
472 472
473 473 def branchinfo(self, rev):
474 474 """Return branch name and close flag for rev, using and updating
475 475 persistent cache."""
476 476 changelog = self._repo.changelog
477 477 rbcrevidx = rev * _rbcrecsize
478 478
479 479 # avoid negative index, changelog.read(nullrev) is fast without cache
480 480 if rev == nullrev:
481 481 return changelog.branchinfo(rev)
482 482
483 483 # if requested rev isn't allocated, grow and cache the rev info
484 484 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
485 485 return self._branchinfo(rev)
486 486
487 487 # fast path: extract data from cache, use it if node is matching
488 488 reponode = changelog.node(rev)[:_rbcnodelen]
489 489 cachenode, branchidx = unpack_from(
490 490 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
491 491 close = bool(branchidx & _rbccloseflag)
492 492 if close:
493 493 branchidx &= _rbcbranchidxmask
494 494 if cachenode == '\0\0\0\0':
495 495 pass
496 496 elif cachenode == reponode:
497 497 try:
498 498 return self._names[branchidx], close
499 499 except IndexError:
500 500 # recover from invalid reference to unknown branch
501 501 self._repo.ui.debug("referenced branch names not found"
502 502 " - rebuilding revision branch cache from scratch\n")
503 503 self._clear()
504 504 else:
505 505 # rev/node map has changed, invalidate the cache from here up
506 506 self._repo.ui.debug("history modification detected - truncating "
507 507 "revision branch cache to revision %d\n" % rev)
508 508 truncate = rbcrevidx + _rbcrecsize
509 509 del self._rbcrevs[truncate:]
510 510 self._rbcrevslen = min(self._rbcrevslen, truncate)
511 511
512 512 # fall back to slow path and make sure it will be written to disk
513 513 return self._branchinfo(rev)
514 514
515 515 def _branchinfo(self, rev):
516 516 """Retrieve branch info from changelog and update _rbcrevs"""
517 517 changelog = self._repo.changelog
518 518 b, close = changelog.branchinfo(rev)
519 519 if b in self._namesreverse:
520 520 branchidx = self._namesreverse[b]
521 521 else:
522 522 branchidx = len(self._names)
523 523 self._names.append(b)
524 524 self._namesreverse[b] = branchidx
525 525 reponode = changelog.node(rev)
526 526 if close:
527 527 branchidx |= _rbccloseflag
528 528 self._setcachedata(rev, reponode, branchidx)
529 529 return b, close
530 530
531 531 def setdata(self, branch, rev, node, close):
532 532 """add new data information to the cache"""
533 533 if branch in self._namesreverse:
534 534 branchidx = self._namesreverse[branch]
535 535 else:
536 536 branchidx = len(self._names)
537 537 self._names.append(branch)
538 538 self._namesreverse[branch] = branchidx
539 539 if close:
540 540 branchidx |= _rbccloseflag
541 541 self._setcachedata(rev, node, branchidx)
542 542 # If no cache data were readable (non exists, bad permission, etc)
543 543 # the cache was bypassing itself by setting:
544 544 #
545 545 # self.branchinfo = self._branchinfo
546 546 #
547 547 # Since we now have data in the cache, we need to drop this bypassing.
548 548 if r'branchinfo' in vars(self):
549 549 del self.branchinfo
550 550
551 551 def _setcachedata(self, rev, node, branchidx):
552 552 """Writes the node's branch data to the in-memory cache data."""
553 553 if rev == nullrev:
554 554 return
555 555 rbcrevidx = rev * _rbcrecsize
556 556 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
557 557 self._rbcrevs.extend('\0' *
558 558 (len(self._repo.changelog) * _rbcrecsize -
559 559 len(self._rbcrevs)))
560 560 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
561 561 self._rbcrevslen = min(self._rbcrevslen, rev)
562 562
563 563 tr = self._repo.currenttransaction()
564 564 if tr:
565 565 tr.addfinalize('write-revbranchcache', self.write)
566 566
567 567 def write(self, tr=None):
568 568 """Save branch cache if it is dirty."""
569 569 repo = self._repo
570 570 wlock = None
571 571 step = ''
572 572 try:
573 573 if self._rbcnamescount < len(self._names):
574 574 step = ' names'
575 575 wlock = repo.wlock(wait=False)
576 576 if self._rbcnamescount != 0:
577 577 f = repo.cachevfs.open(_rbcnames, 'ab')
578 578 if f.tell() == self._rbcsnameslen:
579 579 f.write('\0')
580 580 else:
581 581 f.close()
582 582 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
583 583 self._rbcnamescount = 0
584 584 self._rbcrevslen = 0
585 585 if self._rbcnamescount == 0:
586 586 # before rewriting names, make sure references are removed
587 587 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
588 588 f = repo.cachevfs.open(_rbcnames, 'wb')
589 589 f.write('\0'.join(encoding.fromlocal(b)
590 590 for b in self._names[self._rbcnamescount:]))
591 591 self._rbcsnameslen = f.tell()
592 592 f.close()
593 593 self._rbcnamescount = len(self._names)
594 594
595 595 start = self._rbcrevslen * _rbcrecsize
596 596 if start != len(self._rbcrevs):
597 597 step = ''
598 598 if wlock is None:
599 599 wlock = repo.wlock(wait=False)
600 600 revs = min(len(repo.changelog),
601 601 len(self._rbcrevs) // _rbcrecsize)
602 602 f = repo.cachevfs.open(_rbcrevs, 'ab')
603 603 if f.tell() != start:
604 604 repo.ui.debug("truncating cache/%s to %d\n"
605 605 % (_rbcrevs, start))
606 606 f.seek(start)
607 607 if f.tell() != start:
608 608 start = 0
609 609 f.seek(start)
610 610 f.truncate()
611 611 end = revs * _rbcrecsize
612 612 f.write(self._rbcrevs[start:end])
613 613 f.close()
614 614 self._rbcrevslen = revs
615 615 except (IOError, OSError, error.Abort, error.LockError) as inst:
616 616 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
617 617 % (step, stringutil.forcebytestr(inst)))
618 618 finally:
619 619 if wlock is not None:
620 620 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now