##// END OF EJS Templates
branchcache: lazily validate nodes in iteritems()...
Pulkit Goyal -
r42303:f0203c34 default
parent child Browse files
Show More
@@ -1,668 +1,669 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served.hidden': 'served',
44 44 'served': 'immutable',
45 45 'immutable': 'base'}
46 46
47 47
48 48 class BranchMapCache(object):
49 49 """mapping of filtered views of repo with their branchcache"""
50 50 def __init__(self):
51 51 self._per_filter = {}
52 52
53 53 def __getitem__(self, repo):
54 54 self.updatecache(repo)
55 55 return self._per_filter[repo.filtername]
56 56
57 57 def updatecache(self, repo):
58 58 """Update the cache for the given filtered view on a repository"""
59 59 # This can trigger updates for the caches for subsets of the filtered
60 60 # view, e.g. when there is no cache for this filtered view or the cache
61 61 # is stale.
62 62
63 63 cl = repo.changelog
64 64 filtername = repo.filtername
65 65 bcache = self._per_filter.get(filtername)
66 66 if bcache is None or not bcache.validfor(repo):
67 67 # cache object missing or cache object stale? Read from disk
68 68 bcache = branchcache.fromfile(repo)
69 69
70 70 revs = []
71 71 if bcache is None:
72 72 # no (fresh) cache available anymore, perhaps we can re-use
73 73 # the cache for a subset, then extend that to add info on missing
74 74 # revisions.
75 75 subsetname = subsettable.get(filtername)
76 76 if subsetname is not None:
77 77 subset = repo.filtered(subsetname)
78 78 bcache = self[subset].copy()
79 79 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
80 80 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
81 81 else:
82 82 # nothing to fall back on, start empty.
83 83 bcache = branchcache()
84 84
85 85 revs.extend(cl.revs(start=bcache.tiprev + 1))
86 86 if revs:
87 87 bcache.update(repo, revs)
88 88
89 89 assert bcache.validfor(repo), filtername
90 90 self._per_filter[repo.filtername] = bcache
91 91
92 92 def replace(self, repo, remotebranchmap):
93 93 """Replace the branchmap cache for a repo with a branch mapping.
94 94
95 95 This is likely only called during clone with a branch map from a
96 96 remote.
97 97
98 98 """
99 99 cl = repo.changelog
100 100 clrev = cl.rev
101 101 clbranchinfo = cl.branchinfo
102 102 rbheads = []
103 103 closed = []
104 104 for bheads in remotebranchmap.itervalues():
105 105 rbheads += bheads
106 106 for h in bheads:
107 107 r = clrev(h)
108 108 b, c = clbranchinfo(r)
109 109 if c:
110 110 closed.append(h)
111 111
112 112 if rbheads:
113 113 rtiprev = max((int(clrev(node)) for node in rbheads))
114 114 cache = branchcache(
115 115 remotebranchmap, repo[rtiprev].node(), rtiprev,
116 116 closednodes=closed)
117 117
118 118 # Try to stick it as low as possible
119 119 # filter above served are unlikely to be fetch from a clone
120 120 for candidate in ('base', 'immutable', 'served'):
121 121 rview = repo.filtered(candidate)
122 122 if cache.validfor(rview):
123 123 self._per_filter[candidate] = cache
124 124 cache.write(rview)
125 125 return
126 126
127 127 def clear(self):
128 128 self._per_filter.clear()
129 129
130 130 def _unknownnode(node):
131 131 """ raises ValueError when branchcache found a node which does not exists
132 132 """
133 133 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
134 134
135 135 class branchcache(object):
136 136 """A dict like object that hold branches heads cache.
137 137
138 138 This cache is used to avoid costly computations to determine all the
139 139 branch heads of a repo.
140 140
141 141 The cache is serialized on disk in the following format:
142 142
143 143 <tip hex node> <tip rev number> [optional filtered repo hex hash]
144 144 <branch head hex node> <open/closed state> <branch name>
145 145 <branch head hex node> <open/closed state> <branch name>
146 146 ...
147 147
148 148 The first line is used to check if the cache is still valid. If the
149 149 branch cache is for a filtered repo view, an optional third hash is
150 150 included that hashes the hashes of all filtered revisions.
151 151
152 152 The open/closed state is represented by a single letter 'o' or 'c'.
153 153 This field can be used to avoid changelog reads when determining if a
154 154 branch head closes a branch or not.
155 155 """
156 156
157 157 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
158 158 filteredhash=None, closednodes=None, hasnode=None):
159 159 """ hasnode is a function which can be used to verify whether changelog
160 160 has a given node or not. If it's not provided, we assume that every node
161 161 we have exists in changelog """
162 162 self.tipnode = tipnode
163 163 self.tiprev = tiprev
164 164 self.filteredhash = filteredhash
165 165 # closednodes is a set of nodes that close their branch. If the branch
166 166 # cache has been updated, it may contain nodes that are no longer
167 167 # heads.
168 168 if closednodes is None:
169 169 self._closednodes = set()
170 170 else:
171 171 self._closednodes = closednodes
172 172 self._entries = dict(entries)
173 173 # whether closed nodes are verified or not
174 174 self._closedverified = False
175 175 # branches for which nodes are verified
176 176 self._verifiedbranches = set()
177 177 self._hasnode = hasnode
178 178 if self._hasnode is None:
179 179 self._hasnode = lambda x: True
180 180
181 181 def _verifyclosed(self):
182 182 """ verify the closed nodes we have """
183 183 if self._closedverified:
184 184 return
185 185 for node in self._closednodes:
186 186 if not self._hasnode(node):
187 187 _unknownnode(node)
188 188
189 189 self._closedverified = True
190 190
191 191 def _verifybranch(self, branch):
192 192 """ verify head nodes for the given branch. """
193 193 if branch not in self._entries or branch in self._verifiedbranches:
194 194 return
195 195 for n in self._entries[branch]:
196 196 if not self._hasnode(n):
197 197 _unknownnode(n)
198 198
199 199 self._verifiedbranches.add(branch)
200 200
201 201 def _verifyall(self):
202 202 """ verifies nodes of all the branches """
203 203 needverification = set(self._entries.keys()) - self._verifiedbranches
204 204 for b in needverification:
205 205 self._verifybranch(b)
206 206
207 207 def __iter__(self):
208 208 return iter(self._entries)
209 209
210 210 def __setitem__(self, key, value):
211 211 self._entries[key] = value
212 212
213 213 def __getitem__(self, key):
214 214 self._verifybranch(key)
215 215 return self._entries[key]
216 216
217 217 def __contains__(self, key):
218 218 self._verifybranch(key)
219 219 return key in self._entries
220 220
221 221 def iteritems(self):
222 self._verifyall()
223 return self._entries.iteritems()
222 for k, v in self._entries.iteritems():
223 self._verifybranch(k)
224 yield k, v
224 225
225 226 def hasbranch(self, label):
226 227 """ checks whether a branch of this name exists or not """
227 228 self._verifybranch(label)
228 229 return label in self._entries
229 230
230 231 @classmethod
231 232 def fromfile(cls, repo):
232 233 f = None
233 234 try:
234 235 f = repo.cachevfs(cls._filename(repo))
235 236 lineiter = iter(f)
236 237 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
237 238 last, lrev = cachekey[:2]
238 239 last, lrev = bin(last), int(lrev)
239 240 filteredhash = None
240 241 hasnode = repo.changelog.hasnode
241 242 if len(cachekey) > 2:
242 243 filteredhash = bin(cachekey[2])
243 244 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
244 245 hasnode=hasnode)
245 246 if not bcache.validfor(repo):
246 247 # invalidate the cache
247 248 raise ValueError(r'tip differs')
248 249 bcache.load(repo, lineiter)
249 250 except (IOError, OSError):
250 251 return None
251 252
252 253 except Exception as inst:
253 254 if repo.ui.debugflag:
254 255 msg = 'invalid branchheads cache'
255 256 if repo.filtername is not None:
256 257 msg += ' (%s)' % repo.filtername
257 258 msg += ': %s\n'
258 259 repo.ui.debug(msg % pycompat.bytestr(inst))
259 260 bcache = None
260 261
261 262 finally:
262 263 if f:
263 264 f.close()
264 265
265 266 return bcache
266 267
267 268 def load(self, repo, lineiter):
268 269 """ fully loads the branchcache by reading from the file using the line
269 270 iterator passed"""
270 271 for line in lineiter:
271 272 line = line.rstrip('\n')
272 273 if not line:
273 274 continue
274 275 node, state, label = line.split(" ", 2)
275 276 if state not in 'oc':
276 277 raise ValueError(r'invalid branch state')
277 278 label = encoding.tolocal(label.strip())
278 279 node = bin(node)
279 280 self._entries.setdefault(label, []).append(node)
280 281 if state == 'c':
281 282 self._closednodes.add(node)
282 283
283 284 @staticmethod
284 285 def _filename(repo):
285 286 """name of a branchcache file for a given repo or repoview"""
286 287 filename = "branch2"
287 288 if repo.filtername:
288 289 filename = '%s-%s' % (filename, repo.filtername)
289 290 return filename
290 291
291 292 def validfor(self, repo):
292 293 """Is the cache content valid regarding a repo
293 294
294 295 - False when cached tipnode is unknown or if we detect a strip.
295 296 - True when cache is up to date or a subset of current repo."""
296 297 try:
297 298 return ((self.tipnode == repo.changelog.node(self.tiprev))
298 299 and (self.filteredhash ==
299 300 scmutil.filteredhash(repo, self.tiprev)))
300 301 except IndexError:
301 302 return False
302 303
303 304 def _branchtip(self, heads):
304 305 '''Return tuple with last open head in heads and false,
305 306 otherwise return last closed head and true.'''
306 307 tip = heads[-1]
307 308 closed = True
308 309 for h in reversed(heads):
309 310 if h not in self._closednodes:
310 311 tip = h
311 312 closed = False
312 313 break
313 314 return tip, closed
314 315
315 316 def branchtip(self, branch):
316 317 '''Return the tipmost open head on branch head, otherwise return the
317 318 tipmost closed head on branch.
318 319 Raise KeyError for unknown branch.'''
319 320 return self._branchtip(self[branch])[0]
320 321
321 322 def iteropen(self, nodes):
322 323 return (n for n in nodes if n not in self._closednodes)
323 324
324 325 def branchheads(self, branch, closed=False):
325 326 self._verifybranch(branch)
326 327 heads = self._entries[branch]
327 328 if not closed:
328 329 heads = list(self.iteropen(heads))
329 330 return heads
330 331
331 332 def iterbranches(self):
332 333 for bn, heads in self.iteritems():
333 334 yield (bn, heads) + self._branchtip(heads)
334 335
335 336 def iterheads(self):
336 337 """ returns all the heads """
337 338 self._verifyall()
338 339 return self._entries.itervalues()
339 340
340 341 def copy(self):
341 342 """return an deep copy of the branchcache object"""
342 343 return type(self)(
343 344 self._entries, self.tipnode, self.tiprev, self.filteredhash,
344 345 self._closednodes)
345 346
346 347 def write(self, repo):
347 348 try:
348 349 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
349 350 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
350 351 if self.filteredhash is not None:
351 352 cachekey.append(hex(self.filteredhash))
352 353 f.write(" ".join(cachekey) + '\n')
353 354 nodecount = 0
354 355 for label, nodes in sorted(self.iteritems()):
355 356 label = encoding.fromlocal(label)
356 357 for node in nodes:
357 358 nodecount += 1
358 359 if node in self._closednodes:
359 360 state = 'c'
360 361 else:
361 362 state = 'o'
362 363 f.write("%s %s %s\n" % (hex(node), state, label))
363 364 f.close()
364 365 repo.ui.log('branchcache',
365 366 'wrote %s branch cache with %d labels and %d nodes\n',
366 367 repo.filtername, len(self._entries), nodecount)
367 368 except (IOError, OSError, error.Abort) as inst:
368 369 # Abort may be raised by read only opener, so log and continue
369 370 repo.ui.debug("couldn't write branch cache: %s\n" %
370 371 stringutil.forcebytestr(inst))
371 372
372 373 def update(self, repo, revgen):
373 374 """Given a branchhead cache, self, that may have extra nodes or be
374 375 missing heads, and a generator of nodes that are strictly a superset of
375 376 heads missing, this function updates self to be correct.
376 377 """
377 378 starttime = util.timer()
378 379 cl = repo.changelog
379 380 # collect new branch entries
380 381 newbranches = {}
381 382 getbranchinfo = repo.revbranchcache().branchinfo
382 383 for r in revgen:
383 384 branch, closesbranch = getbranchinfo(r)
384 385 newbranches.setdefault(branch, []).append(r)
385 386 if closesbranch:
386 387 self._closednodes.add(cl.node(r))
387 388
388 389 # fetch current topological heads to speed up filtering
389 390 topoheads = set(cl.headrevs())
390 391
391 392 # if older branchheads are reachable from new ones, they aren't
392 393 # really branchheads. Note checking parents is insufficient:
393 394 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
394 395 for branch, newheadrevs in newbranches.iteritems():
395 396 bheads = self._entries.setdefault(branch, [])
396 397 bheadset = set(cl.rev(node) for node in bheads)
397 398
398 399 # This have been tested True on all internal usage of this function.
399 400 # run it again in case of doubt
400 401 # assert not (set(bheadrevs) & set(newheadrevs))
401 402 bheadset.update(newheadrevs)
402 403
403 404 # This prunes out two kinds of heads - heads that are superseded by
404 405 # a head in newheadrevs, and newheadrevs that are not heads because
405 406 # an existing head is their descendant.
406 407 uncertain = bheadset - topoheads
407 408 if uncertain:
408 409 floorrev = min(uncertain)
409 410 ancestors = set(cl.ancestors(newheadrevs, floorrev))
410 411 bheadset -= ancestors
411 412 bheadrevs = sorted(bheadset)
412 413 self[branch] = [cl.node(rev) for rev in bheadrevs]
413 414 tiprev = bheadrevs[-1]
414 415 if tiprev > self.tiprev:
415 416 self.tipnode = cl.node(tiprev)
416 417 self.tiprev = tiprev
417 418
418 419 if not self.validfor(repo):
419 420 # cache key are not valid anymore
420 421 self.tipnode = nullid
421 422 self.tiprev = nullrev
422 423 for heads in self.iterheads():
423 424 tiprev = max(cl.rev(node) for node in heads)
424 425 if tiprev > self.tiprev:
425 426 self.tipnode = cl.node(tiprev)
426 427 self.tiprev = tiprev
427 428 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
428 429
429 430 duration = util.timer() - starttime
430 431 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
431 432 repo.filtername or b'None', duration)
432 433
433 434 self.write(repo)
434 435
435 436
436 437 class remotebranchcache(branchcache):
437 438 """Branchmap info for a remote connection, should not write locally"""
438 439 def write(self, repo):
439 440 pass
440 441
441 442
442 443 # Revision branch info cache
443 444
444 445 _rbcversion = '-v1'
445 446 _rbcnames = 'rbc-names' + _rbcversion
446 447 _rbcrevs = 'rbc-revs' + _rbcversion
447 448 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
448 449 _rbcrecfmt = '>4sI'
449 450 _rbcrecsize = calcsize(_rbcrecfmt)
450 451 _rbcnodelen = 4
451 452 _rbcbranchidxmask = 0x7fffffff
452 453 _rbccloseflag = 0x80000000
453 454
454 455 class revbranchcache(object):
455 456 """Persistent cache, mapping from revision number to branch name and close.
456 457 This is a low level cache, independent of filtering.
457 458
458 459 Branch names are stored in rbc-names in internal encoding separated by 0.
459 460 rbc-names is append-only, and each branch name is only stored once and will
460 461 thus have a unique index.
461 462
462 463 The branch info for each revision is stored in rbc-revs as constant size
463 464 records. The whole file is read into memory, but it is only 'parsed' on
464 465 demand. The file is usually append-only but will be truncated if repo
465 466 modification is detected.
466 467 The record for each revision contains the first 4 bytes of the
467 468 corresponding node hash, and the record is only used if it still matches.
468 469 Even a completely trashed rbc-revs fill thus still give the right result
469 470 while converging towards full recovery ... assuming no incorrectly matching
470 471 node hashes.
471 472 The record also contains 4 bytes where 31 bits contains the index of the
472 473 branch and the last bit indicate that it is a branch close commit.
473 474 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
474 475 and will grow with it but be 1/8th of its size.
475 476 """
476 477
477 478 def __init__(self, repo, readonly=True):
478 479 assert repo.filtername is None
479 480 self._repo = repo
480 481 self._names = [] # branch names in local encoding with static index
481 482 self._rbcrevs = bytearray()
482 483 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
483 484 try:
484 485 bndata = repo.cachevfs.read(_rbcnames)
485 486 self._rbcsnameslen = len(bndata) # for verification before writing
486 487 if bndata:
487 488 self._names = [encoding.tolocal(bn)
488 489 for bn in bndata.split('\0')]
489 490 except (IOError, OSError):
490 491 if readonly:
491 492 # don't try to use cache - fall back to the slow path
492 493 self.branchinfo = self._branchinfo
493 494
494 495 if self._names:
495 496 try:
496 497 data = repo.cachevfs.read(_rbcrevs)
497 498 self._rbcrevs[:] = data
498 499 except (IOError, OSError) as inst:
499 500 repo.ui.debug("couldn't read revision branch cache: %s\n" %
500 501 stringutil.forcebytestr(inst))
501 502 # remember number of good records on disk
502 503 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
503 504 len(repo.changelog))
504 505 if self._rbcrevslen == 0:
505 506 self._names = []
506 507 self._rbcnamescount = len(self._names) # number of names read at
507 508 # _rbcsnameslen
508 509
509 510 def _clear(self):
510 511 self._rbcsnameslen = 0
511 512 del self._names[:]
512 513 self._rbcnamescount = 0
513 514 self._rbcrevslen = len(self._repo.changelog)
514 515 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
515 516 util.clearcachedproperty(self, '_namesreverse')
516 517
517 518 @util.propertycache
518 519 def _namesreverse(self):
519 520 return dict((b, r) for r, b in enumerate(self._names))
520 521
521 522 def branchinfo(self, rev):
522 523 """Return branch name and close flag for rev, using and updating
523 524 persistent cache."""
524 525 changelog = self._repo.changelog
525 526 rbcrevidx = rev * _rbcrecsize
526 527
527 528 # avoid negative index, changelog.read(nullrev) is fast without cache
528 529 if rev == nullrev:
529 530 return changelog.branchinfo(rev)
530 531
531 532 # if requested rev isn't allocated, grow and cache the rev info
532 533 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
533 534 return self._branchinfo(rev)
534 535
535 536 # fast path: extract data from cache, use it if node is matching
536 537 reponode = changelog.node(rev)[:_rbcnodelen]
537 538 cachenode, branchidx = unpack_from(
538 539 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
539 540 close = bool(branchidx & _rbccloseflag)
540 541 if close:
541 542 branchidx &= _rbcbranchidxmask
542 543 if cachenode == '\0\0\0\0':
543 544 pass
544 545 elif cachenode == reponode:
545 546 try:
546 547 return self._names[branchidx], close
547 548 except IndexError:
548 549 # recover from invalid reference to unknown branch
549 550 self._repo.ui.debug("referenced branch names not found"
550 551 " - rebuilding revision branch cache from scratch\n")
551 552 self._clear()
552 553 else:
553 554 # rev/node map has changed, invalidate the cache from here up
554 555 self._repo.ui.debug("history modification detected - truncating "
555 556 "revision branch cache to revision %d\n" % rev)
556 557 truncate = rbcrevidx + _rbcrecsize
557 558 del self._rbcrevs[truncate:]
558 559 self._rbcrevslen = min(self._rbcrevslen, truncate)
559 560
560 561 # fall back to slow path and make sure it will be written to disk
561 562 return self._branchinfo(rev)
562 563
563 564 def _branchinfo(self, rev):
564 565 """Retrieve branch info from changelog and update _rbcrevs"""
565 566 changelog = self._repo.changelog
566 567 b, close = changelog.branchinfo(rev)
567 568 if b in self._namesreverse:
568 569 branchidx = self._namesreverse[b]
569 570 else:
570 571 branchidx = len(self._names)
571 572 self._names.append(b)
572 573 self._namesreverse[b] = branchidx
573 574 reponode = changelog.node(rev)
574 575 if close:
575 576 branchidx |= _rbccloseflag
576 577 self._setcachedata(rev, reponode, branchidx)
577 578 return b, close
578 579
579 580 def setdata(self, branch, rev, node, close):
580 581 """add new data information to the cache"""
581 582 if branch in self._namesreverse:
582 583 branchidx = self._namesreverse[branch]
583 584 else:
584 585 branchidx = len(self._names)
585 586 self._names.append(branch)
586 587 self._namesreverse[branch] = branchidx
587 588 if close:
588 589 branchidx |= _rbccloseflag
589 590 self._setcachedata(rev, node, branchidx)
590 591 # If no cache data were readable (non exists, bad permission, etc)
591 592 # the cache was bypassing itself by setting:
592 593 #
593 594 # self.branchinfo = self._branchinfo
594 595 #
595 596 # Since we now have data in the cache, we need to drop this bypassing.
596 597 if r'branchinfo' in vars(self):
597 598 del self.branchinfo
598 599
599 600 def _setcachedata(self, rev, node, branchidx):
600 601 """Writes the node's branch data to the in-memory cache data."""
601 602 if rev == nullrev:
602 603 return
603 604 rbcrevidx = rev * _rbcrecsize
604 605 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
605 606 self._rbcrevs.extend('\0' *
606 607 (len(self._repo.changelog) * _rbcrecsize -
607 608 len(self._rbcrevs)))
608 609 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
609 610 self._rbcrevslen = min(self._rbcrevslen, rev)
610 611
611 612 tr = self._repo.currenttransaction()
612 613 if tr:
613 614 tr.addfinalize('write-revbranchcache', self.write)
614 615
615 616 def write(self, tr=None):
616 617 """Save branch cache if it is dirty."""
617 618 repo = self._repo
618 619 wlock = None
619 620 step = ''
620 621 try:
621 622 if self._rbcnamescount < len(self._names):
622 623 step = ' names'
623 624 wlock = repo.wlock(wait=False)
624 625 if self._rbcnamescount != 0:
625 626 f = repo.cachevfs.open(_rbcnames, 'ab')
626 627 if f.tell() == self._rbcsnameslen:
627 628 f.write('\0')
628 629 else:
629 630 f.close()
630 631 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
631 632 self._rbcnamescount = 0
632 633 self._rbcrevslen = 0
633 634 if self._rbcnamescount == 0:
634 635 # before rewriting names, make sure references are removed
635 636 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
636 637 f = repo.cachevfs.open(_rbcnames, 'wb')
637 638 f.write('\0'.join(encoding.fromlocal(b)
638 639 for b in self._names[self._rbcnamescount:]))
639 640 self._rbcsnameslen = f.tell()
640 641 f.close()
641 642 self._rbcnamescount = len(self._names)
642 643
643 644 start = self._rbcrevslen * _rbcrecsize
644 645 if start != len(self._rbcrevs):
645 646 step = ''
646 647 if wlock is None:
647 648 wlock = repo.wlock(wait=False)
648 649 revs = min(len(repo.changelog),
649 650 len(self._rbcrevs) // _rbcrecsize)
650 651 f = repo.cachevfs.open(_rbcrevs, 'ab')
651 652 if f.tell() != start:
652 653 repo.ui.debug("truncating cache/%s to %d\n"
653 654 % (_rbcrevs, start))
654 655 f.seek(start)
655 656 if f.tell() != start:
656 657 start = 0
657 658 f.seek(start)
658 659 f.truncate()
659 660 end = revs * _rbcrecsize
660 661 f.write(self._rbcrevs[start:end])
661 662 f.close()
662 663 self._rbcrevslen = revs
663 664 except (IOError, OSError, error.Abort, error.LockError) as inst:
664 665 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
665 666 % (step, stringutil.forcebytestr(inst)))
666 667 finally:
667 668 if wlock is not None:
668 669 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now