##// END OF EJS Templates
branchcache: don't verify closed nodes in _branchtip()...
Pulkit Goyal -
r42292:be5eeaf5 default
parent child Browse files
Show More
@@ -1,669 +1,668 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129 def _unknownnode(node):
130 130 """ raises ValueError when branchcache found a node which does not exists
131 131 """
132 132 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
133 133
134 134 class branchcache(object):
135 135 """A dict like object that hold branches heads cache.
136 136
137 137 This cache is used to avoid costly computations to determine all the
138 138 branch heads of a repo.
139 139
140 140 The cache is serialized on disk in the following format:
141 141
142 142 <tip hex node> <tip rev number> [optional filtered repo hex hash]
143 143 <branch head hex node> <open/closed state> <branch name>
144 144 <branch head hex node> <open/closed state> <branch name>
145 145 ...
146 146
147 147 The first line is used to check if the cache is still valid. If the
148 148 branch cache is for a filtered repo view, an optional third hash is
149 149 included that hashes the hashes of all filtered revisions.
150 150
151 151 The open/closed state is represented by a single letter 'o' or 'c'.
152 152 This field can be used to avoid changelog reads when determining if a
153 153 branch head closes a branch or not.
154 154 """
155 155
156 156 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
157 157 filteredhash=None, closednodes=None, hasnode=None):
158 158 """ hasnode is a function which can be used to verify whether changelog
159 159 has a given node or not. If it's not provided, we assume that every node
160 160 we have exists in changelog """
161 161 self.tipnode = tipnode
162 162 self.tiprev = tiprev
163 163 self.filteredhash = filteredhash
164 164 # closednodes is a set of nodes that close their branch. If the branch
165 165 # cache has been updated, it may contain nodes that are no longer
166 166 # heads.
167 167 if closednodes is None:
168 168 self._closednodes = set()
169 169 else:
170 170 self._closednodes = closednodes
171 171 self._entries = dict(entries)
172 172 # whether closed nodes are verified or not
173 173 self._closedverified = False
174 174 # branches for which nodes are verified
175 175 self._verifiedbranches = set()
176 176 self._hasnode = hasnode
177 177 if self._hasnode is None:
178 178 self._hasnode = lambda x: True
179 179
180 180 def _verifyclosed(self):
181 181 """ verify the closed nodes we have """
182 182 if self._closedverified:
183 183 return
184 184 for node in self._closednodes:
185 185 if not self._hasnode(node):
186 186 _unknownnode(node)
187 187
188 188 self._closedverified = True
189 189
190 190 def _verifybranch(self, branch):
191 191 """ verify head nodes for the given branch. If branch is None, verify
192 192 for all the branches """
193 193 if branch not in self._entries or branch in self._verifiedbranches:
194 194 return
195 195 for n in self._entries[branch]:
196 196 if not self._hasnode(n):
197 197 _unknownnode(n)
198 198
199 199 self._verifiedbranches.add(branch)
200 200
201 201 def _verifyall(self):
202 202 """ verifies nodes of all the branches """
203 203 for b in self._entries:
204 204 self._verifybranch(b)
205 205
206 206 def __iter__(self):
207 207 return iter(self._entries)
208 208
209 209 def __setitem__(self, key, value):
210 210 self._entries[key] = value
211 211
212 212 def __getitem__(self, key):
213 213 self._verifybranch(key)
214 214 return self._entries[key]
215 215
216 216 def __contains__(self, key):
217 217 self._verifybranch(key)
218 218 return key in self._entries
219 219
220 220 def iteritems(self):
221 221 self._verifyall()
222 222 return self._entries.iteritems()
223 223
224 224 def hasbranch(self, label):
225 225 """ checks whether a branch of this name exists or not """
226 226 self._verifybranch(label)
227 227 return label in self._entries
228 228
229 229 @classmethod
230 230 def fromfile(cls, repo):
231 231 f = None
232 232 try:
233 233 f = repo.cachevfs(cls._filename(repo))
234 234 lineiter = iter(f)
235 235 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
236 236 last, lrev = cachekey[:2]
237 237 last, lrev = bin(last), int(lrev)
238 238 filteredhash = None
239 239 hasnode = repo.changelog.hasnode
240 240 if len(cachekey) > 2:
241 241 filteredhash = bin(cachekey[2])
242 242 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
243 243 hasnode=hasnode)
244 244 if not bcache.validfor(repo):
245 245 # invalidate the cache
246 246 raise ValueError(r'tip differs')
247 247 bcache.load(repo, lineiter)
248 248 except (IOError, OSError):
249 249 return None
250 250
251 251 except Exception as inst:
252 252 if repo.ui.debugflag:
253 253 msg = 'invalid branchheads cache'
254 254 if repo.filtername is not None:
255 255 msg += ' (%s)' % repo.filtername
256 256 msg += ': %s\n'
257 257 repo.ui.debug(msg % pycompat.bytestr(inst))
258 258 bcache = None
259 259
260 260 finally:
261 261 if f:
262 262 f.close()
263 263
264 264 return bcache
265 265
266 266 def load(self, repo, lineiter):
267 267 """ fully loads the branchcache by reading from the file using the line
268 268 iterator passed"""
269 269 for line in lineiter:
270 270 line = line.rstrip('\n')
271 271 if not line:
272 272 continue
273 273 node, state, label = line.split(" ", 2)
274 274 if state not in 'oc':
275 275 raise ValueError(r'invalid branch state')
276 276 label = encoding.tolocal(label.strip())
277 277 node = bin(node)
278 278 self._entries.setdefault(label, []).append(node)
279 279 if state == 'c':
280 280 self._closednodes.add(node)
281 281
282 282 @staticmethod
283 283 def _filename(repo):
284 284 """name of a branchcache file for a given repo or repoview"""
285 285 filename = "branch2"
286 286 if repo.filtername:
287 287 filename = '%s-%s' % (filename, repo.filtername)
288 288 return filename
289 289
290 290 def validfor(self, repo):
291 291 """Is the cache content valid regarding a repo
292 292
293 293 - False when cached tipnode is unknown or if we detect a strip.
294 294 - True when cache is up to date or a subset of current repo."""
295 295 try:
296 296 return ((self.tipnode == repo.changelog.node(self.tiprev))
297 297 and (self.filteredhash ==
298 298 scmutil.filteredhash(repo, self.tiprev)))
299 299 except IndexError:
300 300 return False
301 301
302 302 def _branchtip(self, heads):
303 303 '''Return tuple with last open head in heads and false,
304 304 otherwise return last closed head and true.'''
305 305 tip = heads[-1]
306 306 closed = True
307 self._verifyclosed()
308 307 for h in reversed(heads):
309 308 if h not in self._closednodes:
310 309 tip = h
311 310 closed = False
312 311 break
313 312 return tip, closed
314 313
315 314 def branchtip(self, branch):
316 315 '''Return the tipmost open head on branch head, otherwise return the
317 316 tipmost closed head on branch.
318 317 Raise KeyError for unknown branch.'''
319 318 return self._branchtip(self[branch])[0]
320 319
321 320 def iteropen(self, nodes):
322 321 return (n for n in nodes if n not in self._closednodes)
323 322
324 323 def branchheads(self, branch, closed=False):
325 324 self._verifybranch(branch)
326 325 heads = self._entries[branch]
327 326 if not closed:
328 327 heads = list(self.iteropen(heads))
329 328 return heads
330 329
331 330 def iterbranches(self):
332 331 for bn, heads in self.iteritems():
333 332 yield (bn, heads) + self._branchtip(heads)
334 333
335 334 def iterheads(self):
336 335 """ returns all the heads """
337 336 self._verifyall()
338 337 return self._entries.itervalues()
339 338
340 339 def copy(self):
341 340 """return an deep copy of the branchcache object"""
342 341 self._verifyall()
343 342 return type(self)(
344 343 self._entries, self.tipnode, self.tiprev, self.filteredhash,
345 344 self._closednodes)
346 345
347 346 def write(self, repo):
348 347 try:
349 348 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
350 349 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
351 350 if self.filteredhash is not None:
352 351 cachekey.append(hex(self.filteredhash))
353 352 f.write(" ".join(cachekey) + '\n')
354 353 nodecount = 0
355 354 for label, nodes in sorted(self.iteritems()):
356 355 label = encoding.fromlocal(label)
357 356 for node in nodes:
358 357 nodecount += 1
359 358 if node in self._closednodes:
360 359 state = 'c'
361 360 else:
362 361 state = 'o'
363 362 f.write("%s %s %s\n" % (hex(node), state, label))
364 363 f.close()
365 364 repo.ui.log('branchcache',
366 365 'wrote %s branch cache with %d labels and %d nodes\n',
367 366 repo.filtername, len(self._entries), nodecount)
368 367 except (IOError, OSError, error.Abort) as inst:
369 368 # Abort may be raised by read only opener, so log and continue
370 369 repo.ui.debug("couldn't write branch cache: %s\n" %
371 370 stringutil.forcebytestr(inst))
372 371
373 372 def update(self, repo, revgen):
374 373 """Given a branchhead cache, self, that may have extra nodes or be
375 374 missing heads, and a generator of nodes that are strictly a superset of
376 375 heads missing, this function updates self to be correct.
377 376 """
378 377 starttime = util.timer()
379 378 cl = repo.changelog
380 379 # collect new branch entries
381 380 newbranches = {}
382 381 getbranchinfo = repo.revbranchcache().branchinfo
383 382 for r in revgen:
384 383 branch, closesbranch = getbranchinfo(r)
385 384 newbranches.setdefault(branch, []).append(r)
386 385 if closesbranch:
387 386 self._closednodes.add(cl.node(r))
388 387
389 388 # fetch current topological heads to speed up filtering
390 389 topoheads = set(cl.headrevs())
391 390
392 391 # if older branchheads are reachable from new ones, they aren't
393 392 # really branchheads. Note checking parents is insufficient:
394 393 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
395 394 for branch, newheadrevs in newbranches.iteritems():
396 395 bheads = self._entries.setdefault(branch, [])
397 396 bheadset = set(cl.rev(node) for node in bheads)
398 397
399 398 # This have been tested True on all internal usage of this function.
400 399 # run it again in case of doubt
401 400 # assert not (set(bheadrevs) & set(newheadrevs))
402 401 bheadset.update(newheadrevs)
403 402
404 403 # This prunes out two kinds of heads - heads that are superseded by
405 404 # a head in newheadrevs, and newheadrevs that are not heads because
406 405 # an existing head is their descendant.
407 406 uncertain = bheadset - topoheads
408 407 if uncertain:
409 408 floorrev = min(uncertain)
410 409 ancestors = set(cl.ancestors(newheadrevs, floorrev))
411 410 bheadset -= ancestors
412 411 bheadrevs = sorted(bheadset)
413 412 self[branch] = [cl.node(rev) for rev in bheadrevs]
414 413 tiprev = bheadrevs[-1]
415 414 if tiprev > self.tiprev:
416 415 self.tipnode = cl.node(tiprev)
417 416 self.tiprev = tiprev
418 417
419 418 if not self.validfor(repo):
420 419 # cache key are not valid anymore
421 420 self.tipnode = nullid
422 421 self.tiprev = nullrev
423 422 for heads in self.iterheads():
424 423 tiprev = max(cl.rev(node) for node in heads)
425 424 if tiprev > self.tiprev:
426 425 self.tipnode = cl.node(tiprev)
427 426 self.tiprev = tiprev
428 427 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
429 428
430 429 duration = util.timer() - starttime
431 430 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
432 431 repo.filtername or b'None', duration)
433 432
434 433 self.write(repo)
435 434
436 435
437 436 class remotebranchcache(branchcache):
438 437 """Branchmap info for a remote connection, should not write locally"""
439 438 def write(self, repo):
440 439 pass
441 440
442 441
443 442 # Revision branch info cache
444 443
445 444 _rbcversion = '-v1'
446 445 _rbcnames = 'rbc-names' + _rbcversion
447 446 _rbcrevs = 'rbc-revs' + _rbcversion
448 447 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
449 448 _rbcrecfmt = '>4sI'
450 449 _rbcrecsize = calcsize(_rbcrecfmt)
451 450 _rbcnodelen = 4
452 451 _rbcbranchidxmask = 0x7fffffff
453 452 _rbccloseflag = 0x80000000
454 453
455 454 class revbranchcache(object):
456 455 """Persistent cache, mapping from revision number to branch name and close.
457 456 This is a low level cache, independent of filtering.
458 457
459 458 Branch names are stored in rbc-names in internal encoding separated by 0.
460 459 rbc-names is append-only, and each branch name is only stored once and will
461 460 thus have a unique index.
462 461
463 462 The branch info for each revision is stored in rbc-revs as constant size
464 463 records. The whole file is read into memory, but it is only 'parsed' on
465 464 demand. The file is usually append-only but will be truncated if repo
466 465 modification is detected.
467 466 The record for each revision contains the first 4 bytes of the
468 467 corresponding node hash, and the record is only used if it still matches.
469 468 Even a completely trashed rbc-revs fill thus still give the right result
470 469 while converging towards full recovery ... assuming no incorrectly matching
471 470 node hashes.
472 471 The record also contains 4 bytes where 31 bits contains the index of the
473 472 branch and the last bit indicate that it is a branch close commit.
474 473 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
475 474 and will grow with it but be 1/8th of its size.
476 475 """
477 476
478 477 def __init__(self, repo, readonly=True):
479 478 assert repo.filtername is None
480 479 self._repo = repo
481 480 self._names = [] # branch names in local encoding with static index
482 481 self._rbcrevs = bytearray()
483 482 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
484 483 try:
485 484 bndata = repo.cachevfs.read(_rbcnames)
486 485 self._rbcsnameslen = len(bndata) # for verification before writing
487 486 if bndata:
488 487 self._names = [encoding.tolocal(bn)
489 488 for bn in bndata.split('\0')]
490 489 except (IOError, OSError):
491 490 if readonly:
492 491 # don't try to use cache - fall back to the slow path
493 492 self.branchinfo = self._branchinfo
494 493
495 494 if self._names:
496 495 try:
497 496 data = repo.cachevfs.read(_rbcrevs)
498 497 self._rbcrevs[:] = data
499 498 except (IOError, OSError) as inst:
500 499 repo.ui.debug("couldn't read revision branch cache: %s\n" %
501 500 stringutil.forcebytestr(inst))
502 501 # remember number of good records on disk
503 502 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
504 503 len(repo.changelog))
505 504 if self._rbcrevslen == 0:
506 505 self._names = []
507 506 self._rbcnamescount = len(self._names) # number of names read at
508 507 # _rbcsnameslen
509 508
510 509 def _clear(self):
511 510 self._rbcsnameslen = 0
512 511 del self._names[:]
513 512 self._rbcnamescount = 0
514 513 self._rbcrevslen = len(self._repo.changelog)
515 514 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
516 515 util.clearcachedproperty(self, '_namesreverse')
517 516
518 517 @util.propertycache
519 518 def _namesreverse(self):
520 519 return dict((b, r) for r, b in enumerate(self._names))
521 520
522 521 def branchinfo(self, rev):
523 522 """Return branch name and close flag for rev, using and updating
524 523 persistent cache."""
525 524 changelog = self._repo.changelog
526 525 rbcrevidx = rev * _rbcrecsize
527 526
528 527 # avoid negative index, changelog.read(nullrev) is fast without cache
529 528 if rev == nullrev:
530 529 return changelog.branchinfo(rev)
531 530
532 531 # if requested rev isn't allocated, grow and cache the rev info
533 532 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
534 533 return self._branchinfo(rev)
535 534
536 535 # fast path: extract data from cache, use it if node is matching
537 536 reponode = changelog.node(rev)[:_rbcnodelen]
538 537 cachenode, branchidx = unpack_from(
539 538 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
540 539 close = bool(branchidx & _rbccloseflag)
541 540 if close:
542 541 branchidx &= _rbcbranchidxmask
543 542 if cachenode == '\0\0\0\0':
544 543 pass
545 544 elif cachenode == reponode:
546 545 try:
547 546 return self._names[branchidx], close
548 547 except IndexError:
549 548 # recover from invalid reference to unknown branch
550 549 self._repo.ui.debug("referenced branch names not found"
551 550 " - rebuilding revision branch cache from scratch\n")
552 551 self._clear()
553 552 else:
554 553 # rev/node map has changed, invalidate the cache from here up
555 554 self._repo.ui.debug("history modification detected - truncating "
556 555 "revision branch cache to revision %d\n" % rev)
557 556 truncate = rbcrevidx + _rbcrecsize
558 557 del self._rbcrevs[truncate:]
559 558 self._rbcrevslen = min(self._rbcrevslen, truncate)
560 559
561 560 # fall back to slow path and make sure it will be written to disk
562 561 return self._branchinfo(rev)
563 562
564 563 def _branchinfo(self, rev):
565 564 """Retrieve branch info from changelog and update _rbcrevs"""
566 565 changelog = self._repo.changelog
567 566 b, close = changelog.branchinfo(rev)
568 567 if b in self._namesreverse:
569 568 branchidx = self._namesreverse[b]
570 569 else:
571 570 branchidx = len(self._names)
572 571 self._names.append(b)
573 572 self._namesreverse[b] = branchidx
574 573 reponode = changelog.node(rev)
575 574 if close:
576 575 branchidx |= _rbccloseflag
577 576 self._setcachedata(rev, reponode, branchidx)
578 577 return b, close
579 578
580 579 def setdata(self, branch, rev, node, close):
581 580 """add new data information to the cache"""
582 581 if branch in self._namesreverse:
583 582 branchidx = self._namesreverse[branch]
584 583 else:
585 584 branchidx = len(self._names)
586 585 self._names.append(branch)
587 586 self._namesreverse[branch] = branchidx
588 587 if close:
589 588 branchidx |= _rbccloseflag
590 589 self._setcachedata(rev, node, branchidx)
591 590 # If no cache data were readable (non exists, bad permission, etc)
592 591 # the cache was bypassing itself by setting:
593 592 #
594 593 # self.branchinfo = self._branchinfo
595 594 #
596 595 # Since we now have data in the cache, we need to drop this bypassing.
597 596 if r'branchinfo' in vars(self):
598 597 del self.branchinfo
599 598
600 599 def _setcachedata(self, rev, node, branchidx):
601 600 """Writes the node's branch data to the in-memory cache data."""
602 601 if rev == nullrev:
603 602 return
604 603 rbcrevidx = rev * _rbcrecsize
605 604 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
606 605 self._rbcrevs.extend('\0' *
607 606 (len(self._repo.changelog) * _rbcrecsize -
608 607 len(self._rbcrevs)))
609 608 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
610 609 self._rbcrevslen = min(self._rbcrevslen, rev)
611 610
612 611 tr = self._repo.currenttransaction()
613 612 if tr:
614 613 tr.addfinalize('write-revbranchcache', self.write)
615 614
616 615 def write(self, tr=None):
617 616 """Save branch cache if it is dirty."""
618 617 repo = self._repo
619 618 wlock = None
620 619 step = ''
621 620 try:
622 621 if self._rbcnamescount < len(self._names):
623 622 step = ' names'
624 623 wlock = repo.wlock(wait=False)
625 624 if self._rbcnamescount != 0:
626 625 f = repo.cachevfs.open(_rbcnames, 'ab')
627 626 if f.tell() == self._rbcsnameslen:
628 627 f.write('\0')
629 628 else:
630 629 f.close()
631 630 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
632 631 self._rbcnamescount = 0
633 632 self._rbcrevslen = 0
634 633 if self._rbcnamescount == 0:
635 634 # before rewriting names, make sure references are removed
636 635 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
637 636 f = repo.cachevfs.open(_rbcnames, 'wb')
638 637 f.write('\0'.join(encoding.fromlocal(b)
639 638 for b in self._names[self._rbcnamescount:]))
640 639 self._rbcsnameslen = f.tell()
641 640 f.close()
642 641 self._rbcnamescount = len(self._names)
643 642
644 643 start = self._rbcrevslen * _rbcrecsize
645 644 if start != len(self._rbcrevs):
646 645 step = ''
647 646 if wlock is None:
648 647 wlock = repo.wlock(wait=False)
649 648 revs = min(len(repo.changelog),
650 649 len(self._rbcrevs) // _rbcrecsize)
651 650 f = repo.cachevfs.open(_rbcrevs, 'ab')
652 651 if f.tell() != start:
653 652 repo.ui.debug("truncating cache/%s to %d\n"
654 653 % (_rbcrevs, start))
655 654 f.seek(start)
656 655 if f.tell() != start:
657 656 start = 0
658 657 f.seek(start)
659 658 f.truncate()
660 659 end = revs * _rbcrecsize
661 660 f.write(self._rbcrevs[start:end])
662 661 f.close()
663 662 self._rbcrevslen = revs
664 663 except (IOError, OSError, error.Abort, error.LockError) as inst:
665 664 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
666 665 % (step, stringutil.forcebytestr(inst)))
667 666 finally:
668 667 if wlock is not None:
669 668 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now