##// END OF EJS Templates
branchcache: don't verify closed nodes in iteropen()...
Pulkit Goyal -
r42676:7c9d4cf2 default
parent child Browse files
Show More
@@ -1,670 +1,669 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129 def _unknownnode(node):
130 130 """ raises ValueError when branchcache found a node which does not exists
131 131 """
132 132 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
133 133
134 134 class branchcache(object):
135 135 """A dict like object that hold branches heads cache.
136 136
137 137 This cache is used to avoid costly computations to determine all the
138 138 branch heads of a repo.
139 139
140 140 The cache is serialized on disk in the following format:
141 141
142 142 <tip hex node> <tip rev number> [optional filtered repo hex hash]
143 143 <branch head hex node> <open/closed state> <branch name>
144 144 <branch head hex node> <open/closed state> <branch name>
145 145 ...
146 146
147 147 The first line is used to check if the cache is still valid. If the
148 148 branch cache is for a filtered repo view, an optional third hash is
149 149 included that hashes the hashes of all filtered revisions.
150 150
151 151 The open/closed state is represented by a single letter 'o' or 'c'.
152 152 This field can be used to avoid changelog reads when determining if a
153 153 branch head closes a branch or not.
154 154 """
155 155
156 156 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
157 157 filteredhash=None, closednodes=None, hasnode=None):
158 158 """ hasnode is a function which can be used to verify whether changelog
159 159 has a given node or not. If it's not provided, we assume that every node
160 160 we have exists in changelog """
161 161 self.tipnode = tipnode
162 162 self.tiprev = tiprev
163 163 self.filteredhash = filteredhash
164 164 # closednodes is a set of nodes that close their branch. If the branch
165 165 # cache has been updated, it may contain nodes that are no longer
166 166 # heads.
167 167 if closednodes is None:
168 168 self._closednodes = set()
169 169 else:
170 170 self._closednodes = closednodes
171 171 self._entries = dict(entries)
172 172 # whether closed nodes are verified or not
173 173 self._closedverified = False
174 174 # branches for which nodes are verified
175 175 self._verifiedbranches = set()
176 176 self._hasnode = hasnode
177 177 if self._hasnode is None:
178 178 self._hasnode = lambda x: True
179 179
180 180 def _verifyclosed(self):
181 181 """ verify the closed nodes we have """
182 182 if self._closedverified:
183 183 return
184 184 for node in self._closednodes:
185 185 if not self._hasnode(node):
186 186 _unknownnode(node)
187 187
188 188 self._closedverified = True
189 189
190 190 def _verifybranch(self, branch):
191 191 """ verify head nodes for the given branch. If branch is None, verify
192 192 for all the branches """
193 193 if branch not in self._entries or branch in self._verifiedbranches:
194 194 return
195 195 for n in self._entries[branch]:
196 196 if not self._hasnode(n):
197 197 _unknownnode(n)
198 198
199 199 self._verifiedbranches.add(branch)
200 200
201 201 def _verifyall(self):
202 202 """ verifies nodes of all the branches """
203 203 for b in self._entries:
204 204 self._verifybranch(b)
205 205
206 206 def __iter__(self):
207 207 return iter(self._entries)
208 208
209 209 def __setitem__(self, key, value):
210 210 self._entries[key] = value
211 211
212 212 def __getitem__(self, key):
213 213 self._verifybranch(key)
214 214 return self._entries[key]
215 215
216 216 def __contains__(self, key):
217 217 self._verifybranch(key)
218 218 return key in self._entries
219 219
220 220 def iteritems(self):
221 221 self._verifyall()
222 222 return self._entries.iteritems()
223 223
224 224 def hasbranch(self, label):
225 225 """ checks whether a branch of this name exists or not """
226 226 self._verifybranch(label)
227 227 return label in self._entries
228 228
229 229 @classmethod
230 230 def fromfile(cls, repo):
231 231 f = None
232 232 try:
233 233 f = repo.cachevfs(cls._filename(repo))
234 234 lineiter = iter(f)
235 235 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
236 236 last, lrev = cachekey[:2]
237 237 last, lrev = bin(last), int(lrev)
238 238 filteredhash = None
239 239 hasnode = repo.changelog.hasnode
240 240 if len(cachekey) > 2:
241 241 filteredhash = bin(cachekey[2])
242 242 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
243 243 hasnode=hasnode)
244 244 if not bcache.validfor(repo):
245 245 # invalidate the cache
246 246 raise ValueError(r'tip differs')
247 247 bcache.load(repo, lineiter)
248 248 except (IOError, OSError):
249 249 return None
250 250
251 251 except Exception as inst:
252 252 if repo.ui.debugflag:
253 253 msg = 'invalid branchheads cache'
254 254 if repo.filtername is not None:
255 255 msg += ' (%s)' % repo.filtername
256 256 msg += ': %s\n'
257 257 repo.ui.debug(msg % pycompat.bytestr(inst))
258 258 bcache = None
259 259
260 260 finally:
261 261 if f:
262 262 f.close()
263 263
264 264 return bcache
265 265
266 266 def load(self, repo, lineiter):
267 267 """ fully loads the branchcache by reading from the file using the line
268 268 iterator passed"""
269 269 for line in lineiter:
270 270 line = line.rstrip('\n')
271 271 if not line:
272 272 continue
273 273 node, state, label = line.split(" ", 2)
274 274 if state not in 'oc':
275 275 raise ValueError(r'invalid branch state')
276 276 label = encoding.tolocal(label.strip())
277 277 node = bin(node)
278 278 self._entries.setdefault(label, []).append(node)
279 279 if state == 'c':
280 280 self._closednodes.add(node)
281 281
282 282 @staticmethod
283 283 def _filename(repo):
284 284 """name of a branchcache file for a given repo or repoview"""
285 285 filename = "branch2"
286 286 if repo.filtername:
287 287 filename = '%s-%s' % (filename, repo.filtername)
288 288 return filename
289 289
290 290 def validfor(self, repo):
291 291 """Is the cache content valid regarding a repo
292 292
293 293 - False when cached tipnode is unknown or if we detect a strip.
294 294 - True when cache is up to date or a subset of current repo."""
295 295 try:
296 296 return ((self.tipnode == repo.changelog.node(self.tiprev))
297 297 and (self.filteredhash ==
298 298 scmutil.filteredhash(repo, self.tiprev)))
299 299 except IndexError:
300 300 return False
301 301
302 302 def _branchtip(self, heads):
303 303 '''Return tuple with last open head in heads and false,
304 304 otherwise return last closed head and true.'''
305 305 tip = heads[-1]
306 306 closed = True
307 307 self._verifyclosed()
308 308 for h in reversed(heads):
309 309 if h not in self._closednodes:
310 310 tip = h
311 311 closed = False
312 312 break
313 313 return tip, closed
314 314
315 315 def branchtip(self, branch):
316 316 '''Return the tipmost open head on branch head, otherwise return the
317 317 tipmost closed head on branch.
318 318 Raise KeyError for unknown branch.'''
319 319 return self._branchtip(self[branch])[0]
320 320
321 321 def iteropen(self, nodes):
322 self._verifyclosed()
323 322 return (n for n in nodes if n not in self._closednodes)
324 323
325 324 def branchheads(self, branch, closed=False):
326 325 self._verifybranch(branch)
327 326 heads = self._entries[branch]
328 327 if not closed:
329 328 heads = list(self.iteropen(heads))
330 329 return heads
331 330
332 331 def iterbranches(self):
333 332 for bn, heads in self.iteritems():
334 333 yield (bn, heads) + self._branchtip(heads)
335 334
336 335 def iterheads(self):
337 336 """ returns all the heads """
338 337 self._verifyall()
339 338 return self._entries.itervalues()
340 339
341 340 def copy(self):
342 341 """return an deep copy of the branchcache object"""
343 342 self._verifyall()
344 343 return type(self)(
345 344 self._entries, self.tipnode, self.tiprev, self.filteredhash,
346 345 self._closednodes)
347 346
348 347 def write(self, repo):
349 348 try:
350 349 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
351 350 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
352 351 if self.filteredhash is not None:
353 352 cachekey.append(hex(self.filteredhash))
354 353 f.write(" ".join(cachekey) + '\n')
355 354 nodecount = 0
356 355 for label, nodes in sorted(self.iteritems()):
357 356 label = encoding.fromlocal(label)
358 357 for node in nodes:
359 358 nodecount += 1
360 359 if node in self._closednodes:
361 360 state = 'c'
362 361 else:
363 362 state = 'o'
364 363 f.write("%s %s %s\n" % (hex(node), state, label))
365 364 f.close()
366 365 repo.ui.log('branchcache',
367 366 'wrote %s branch cache with %d labels and %d nodes\n',
368 367 repo.filtername, len(self._entries), nodecount)
369 368 except (IOError, OSError, error.Abort) as inst:
370 369 # Abort may be raised by read only opener, so log and continue
371 370 repo.ui.debug("couldn't write branch cache: %s\n" %
372 371 stringutil.forcebytestr(inst))
373 372
374 373 def update(self, repo, revgen):
375 374 """Given a branchhead cache, self, that may have extra nodes or be
376 375 missing heads, and a generator of nodes that are strictly a superset of
377 376 heads missing, this function updates self to be correct.
378 377 """
379 378 starttime = util.timer()
380 379 cl = repo.changelog
381 380 # collect new branch entries
382 381 newbranches = {}
383 382 getbranchinfo = repo.revbranchcache().branchinfo
384 383 for r in revgen:
385 384 branch, closesbranch = getbranchinfo(r)
386 385 newbranches.setdefault(branch, []).append(r)
387 386 if closesbranch:
388 387 self._closednodes.add(cl.node(r))
389 388
390 389 # fetch current topological heads to speed up filtering
391 390 topoheads = set(cl.headrevs())
392 391
393 392 # if older branchheads are reachable from new ones, they aren't
394 393 # really branchheads. Note checking parents is insufficient:
395 394 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
396 395 for branch, newheadrevs in newbranches.iteritems():
397 396 bheads = self._entries.setdefault(branch, [])
398 397 bheadset = set(cl.rev(node) for node in bheads)
399 398
400 399 # This have been tested True on all internal usage of this function.
401 400 # run it again in case of doubt
402 401 # assert not (set(bheadrevs) & set(newheadrevs))
403 402 bheadset.update(newheadrevs)
404 403
405 404 # This prunes out two kinds of heads - heads that are superseded by
406 405 # a head in newheadrevs, and newheadrevs that are not heads because
407 406 # an existing head is their descendant.
408 407 uncertain = bheadset - topoheads
409 408 if uncertain:
410 409 floorrev = min(uncertain)
411 410 ancestors = set(cl.ancestors(newheadrevs, floorrev))
412 411 bheadset -= ancestors
413 412 bheadrevs = sorted(bheadset)
414 413 self[branch] = [cl.node(rev) for rev in bheadrevs]
415 414 tiprev = bheadrevs[-1]
416 415 if tiprev > self.tiprev:
417 416 self.tipnode = cl.node(tiprev)
418 417 self.tiprev = tiprev
419 418
420 419 if not self.validfor(repo):
421 420 # cache key are not valid anymore
422 421 self.tipnode = nullid
423 422 self.tiprev = nullrev
424 423 for heads in self.iterheads():
425 424 tiprev = max(cl.rev(node) for node in heads)
426 425 if tiprev > self.tiprev:
427 426 self.tipnode = cl.node(tiprev)
428 427 self.tiprev = tiprev
429 428 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
430 429
431 430 duration = util.timer() - starttime
432 431 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
433 432 repo.filtername or b'None', duration)
434 433
435 434 self.write(repo)
436 435
437 436
438 437 class remotebranchcache(branchcache):
439 438 """Branchmap info for a remote connection, should not write locally"""
440 439 def write(self, repo):
441 440 pass
442 441
443 442
444 443 # Revision branch info cache
445 444
446 445 _rbcversion = '-v1'
447 446 _rbcnames = 'rbc-names' + _rbcversion
448 447 _rbcrevs = 'rbc-revs' + _rbcversion
449 448 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
450 449 _rbcrecfmt = '>4sI'
451 450 _rbcrecsize = calcsize(_rbcrecfmt)
452 451 _rbcnodelen = 4
453 452 _rbcbranchidxmask = 0x7fffffff
454 453 _rbccloseflag = 0x80000000
455 454
456 455 class revbranchcache(object):
457 456 """Persistent cache, mapping from revision number to branch name and close.
458 457 This is a low level cache, independent of filtering.
459 458
460 459 Branch names are stored in rbc-names in internal encoding separated by 0.
461 460 rbc-names is append-only, and each branch name is only stored once and will
462 461 thus have a unique index.
463 462
464 463 The branch info for each revision is stored in rbc-revs as constant size
465 464 records. The whole file is read into memory, but it is only 'parsed' on
466 465 demand. The file is usually append-only but will be truncated if repo
467 466 modification is detected.
468 467 The record for each revision contains the first 4 bytes of the
469 468 corresponding node hash, and the record is only used if it still matches.
470 469 Even a completely trashed rbc-revs fill thus still give the right result
471 470 while converging towards full recovery ... assuming no incorrectly matching
472 471 node hashes.
473 472 The record also contains 4 bytes where 31 bits contains the index of the
474 473 branch and the last bit indicate that it is a branch close commit.
475 474 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
476 475 and will grow with it but be 1/8th of its size.
477 476 """
478 477
479 478 def __init__(self, repo, readonly=True):
480 479 assert repo.filtername is None
481 480 self._repo = repo
482 481 self._names = [] # branch names in local encoding with static index
483 482 self._rbcrevs = bytearray()
484 483 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
485 484 try:
486 485 bndata = repo.cachevfs.read(_rbcnames)
487 486 self._rbcsnameslen = len(bndata) # for verification before writing
488 487 if bndata:
489 488 self._names = [encoding.tolocal(bn)
490 489 for bn in bndata.split('\0')]
491 490 except (IOError, OSError):
492 491 if readonly:
493 492 # don't try to use cache - fall back to the slow path
494 493 self.branchinfo = self._branchinfo
495 494
496 495 if self._names:
497 496 try:
498 497 data = repo.cachevfs.read(_rbcrevs)
499 498 self._rbcrevs[:] = data
500 499 except (IOError, OSError) as inst:
501 500 repo.ui.debug("couldn't read revision branch cache: %s\n" %
502 501 stringutil.forcebytestr(inst))
503 502 # remember number of good records on disk
504 503 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
505 504 len(repo.changelog))
506 505 if self._rbcrevslen == 0:
507 506 self._names = []
508 507 self._rbcnamescount = len(self._names) # number of names read at
509 508 # _rbcsnameslen
510 509
511 510 def _clear(self):
512 511 self._rbcsnameslen = 0
513 512 del self._names[:]
514 513 self._rbcnamescount = 0
515 514 self._rbcrevslen = len(self._repo.changelog)
516 515 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
517 516 util.clearcachedproperty(self, '_namesreverse')
518 517
519 518 @util.propertycache
520 519 def _namesreverse(self):
521 520 return dict((b, r) for r, b in enumerate(self._names))
522 521
523 522 def branchinfo(self, rev):
524 523 """Return branch name and close flag for rev, using and updating
525 524 persistent cache."""
526 525 changelog = self._repo.changelog
527 526 rbcrevidx = rev * _rbcrecsize
528 527
529 528 # avoid negative index, changelog.read(nullrev) is fast without cache
530 529 if rev == nullrev:
531 530 return changelog.branchinfo(rev)
532 531
533 532 # if requested rev isn't allocated, grow and cache the rev info
534 533 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
535 534 return self._branchinfo(rev)
536 535
537 536 # fast path: extract data from cache, use it if node is matching
538 537 reponode = changelog.node(rev)[:_rbcnodelen]
539 538 cachenode, branchidx = unpack_from(
540 539 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
541 540 close = bool(branchidx & _rbccloseflag)
542 541 if close:
543 542 branchidx &= _rbcbranchidxmask
544 543 if cachenode == '\0\0\0\0':
545 544 pass
546 545 elif cachenode == reponode:
547 546 try:
548 547 return self._names[branchidx], close
549 548 except IndexError:
550 549 # recover from invalid reference to unknown branch
551 550 self._repo.ui.debug("referenced branch names not found"
552 551 " - rebuilding revision branch cache from scratch\n")
553 552 self._clear()
554 553 else:
555 554 # rev/node map has changed, invalidate the cache from here up
556 555 self._repo.ui.debug("history modification detected - truncating "
557 556 "revision branch cache to revision %d\n" % rev)
558 557 truncate = rbcrevidx + _rbcrecsize
559 558 del self._rbcrevs[truncate:]
560 559 self._rbcrevslen = min(self._rbcrevslen, truncate)
561 560
562 561 # fall back to slow path and make sure it will be written to disk
563 562 return self._branchinfo(rev)
564 563
565 564 def _branchinfo(self, rev):
566 565 """Retrieve branch info from changelog and update _rbcrevs"""
567 566 changelog = self._repo.changelog
568 567 b, close = changelog.branchinfo(rev)
569 568 if b in self._namesreverse:
570 569 branchidx = self._namesreverse[b]
571 570 else:
572 571 branchidx = len(self._names)
573 572 self._names.append(b)
574 573 self._namesreverse[b] = branchidx
575 574 reponode = changelog.node(rev)
576 575 if close:
577 576 branchidx |= _rbccloseflag
578 577 self._setcachedata(rev, reponode, branchidx)
579 578 return b, close
580 579
581 580 def setdata(self, branch, rev, node, close):
582 581 """add new data information to the cache"""
583 582 if branch in self._namesreverse:
584 583 branchidx = self._namesreverse[branch]
585 584 else:
586 585 branchidx = len(self._names)
587 586 self._names.append(branch)
588 587 self._namesreverse[branch] = branchidx
589 588 if close:
590 589 branchidx |= _rbccloseflag
591 590 self._setcachedata(rev, node, branchidx)
592 591 # If no cache data were readable (non exists, bad permission, etc)
593 592 # the cache was bypassing itself by setting:
594 593 #
595 594 # self.branchinfo = self._branchinfo
596 595 #
597 596 # Since we now have data in the cache, we need to drop this bypassing.
598 597 if r'branchinfo' in vars(self):
599 598 del self.branchinfo
600 599
601 600 def _setcachedata(self, rev, node, branchidx):
602 601 """Writes the node's branch data to the in-memory cache data."""
603 602 if rev == nullrev:
604 603 return
605 604 rbcrevidx = rev * _rbcrecsize
606 605 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
607 606 self._rbcrevs.extend('\0' *
608 607 (len(self._repo.changelog) * _rbcrecsize -
609 608 len(self._rbcrevs)))
610 609 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
611 610 self._rbcrevslen = min(self._rbcrevslen, rev)
612 611
613 612 tr = self._repo.currenttransaction()
614 613 if tr:
615 614 tr.addfinalize('write-revbranchcache', self.write)
616 615
617 616 def write(self, tr=None):
618 617 """Save branch cache if it is dirty."""
619 618 repo = self._repo
620 619 wlock = None
621 620 step = ''
622 621 try:
623 622 if self._rbcnamescount < len(self._names):
624 623 step = ' names'
625 624 wlock = repo.wlock(wait=False)
626 625 if self._rbcnamescount != 0:
627 626 f = repo.cachevfs.open(_rbcnames, 'ab')
628 627 if f.tell() == self._rbcsnameslen:
629 628 f.write('\0')
630 629 else:
631 630 f.close()
632 631 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
633 632 self._rbcnamescount = 0
634 633 self._rbcrevslen = 0
635 634 if self._rbcnamescount == 0:
636 635 # before rewriting names, make sure references are removed
637 636 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
638 637 f = repo.cachevfs.open(_rbcnames, 'wb')
639 638 f.write('\0'.join(encoding.fromlocal(b)
640 639 for b in self._names[self._rbcnamescount:]))
641 640 self._rbcsnameslen = f.tell()
642 641 f.close()
643 642 self._rbcnamescount = len(self._names)
644 643
645 644 start = self._rbcrevslen * _rbcrecsize
646 645 if start != len(self._rbcrevs):
647 646 step = ''
648 647 if wlock is None:
649 648 wlock = repo.wlock(wait=False)
650 649 revs = min(len(repo.changelog),
651 650 len(self._rbcrevs) // _rbcrecsize)
652 651 f = repo.cachevfs.open(_rbcrevs, 'ab')
653 652 if f.tell() != start:
654 653 repo.ui.debug("truncating cache/%s to %d\n"
655 654 % (_rbcrevs, start))
656 655 f.seek(start)
657 656 if f.tell() != start:
658 657 start = 0
659 658 f.seek(start)
660 659 f.truncate()
661 660 end = revs * _rbcrecsize
662 661 f.write(self._rbcrevs[start:end])
663 662 f.close()
664 663 self._rbcrevslen = revs
665 664 except (IOError, OSError, error.Abort, error.LockError) as inst:
666 665 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
667 666 % (step, stringutil.forcebytestr(inst)))
668 667 finally:
669 668 if wlock is not None:
670 669 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now