##// END OF EJS Templates
branchcache: don't verify all nodes while writing...
Pulkit Goyal -
r42361:07faf5c6 stable
parent child Browse files
Show More
@@ -1,658 +1,658 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 repoviewutil,
27 27 stringutil,
28 28 )
29 29
30 30 subsettable = repoviewutil. subsettable
31 31
32 32 calcsize = struct.calcsize
33 33 pack_into = struct.pack_into
34 34 unpack_from = struct.unpack_from
35 35
36 36
37 37 class BranchMapCache(object):
38 38 """mapping of filtered views of repo with their branchcache"""
39 39 def __init__(self):
40 40 self._per_filter = {}
41 41
42 42 def __getitem__(self, repo):
43 43 self.updatecache(repo)
44 44 return self._per_filter[repo.filtername]
45 45
46 46 def updatecache(self, repo):
47 47 """Update the cache for the given filtered view on a repository"""
48 48 # This can trigger updates for the caches for subsets of the filtered
49 49 # view, e.g. when there is no cache for this filtered view or the cache
50 50 # is stale.
51 51
52 52 cl = repo.changelog
53 53 filtername = repo.filtername
54 54 bcache = self._per_filter.get(filtername)
55 55 if bcache is None or not bcache.validfor(repo):
56 56 # cache object missing or cache object stale? Read from disk
57 57 bcache = branchcache.fromfile(repo)
58 58
59 59 revs = []
60 60 if bcache is None:
61 61 # no (fresh) cache available anymore, perhaps we can re-use
62 62 # the cache for a subset, then extend that to add info on missing
63 63 # revisions.
64 64 subsetname = subsettable.get(filtername)
65 65 if subsetname is not None:
66 66 subset = repo.filtered(subsetname)
67 67 bcache = self[subset].copy()
68 68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
69 69 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
70 70 else:
71 71 # nothing to fall back on, start empty.
72 72 bcache = branchcache()
73 73
74 74 revs.extend(cl.revs(start=bcache.tiprev + 1))
75 75 if revs:
76 76 bcache.update(repo, revs)
77 77
78 78 assert bcache.validfor(repo), filtername
79 79 self._per_filter[repo.filtername] = bcache
80 80
81 81 def replace(self, repo, remotebranchmap):
82 82 """Replace the branchmap cache for a repo with a branch mapping.
83 83
84 84 This is likely only called during clone with a branch map from a
85 85 remote.
86 86
87 87 """
88 88 cl = repo.changelog
89 89 clrev = cl.rev
90 90 clbranchinfo = cl.branchinfo
91 91 rbheads = []
92 92 closed = []
93 93 for bheads in remotebranchmap.itervalues():
94 94 rbheads += bheads
95 95 for h in bheads:
96 96 r = clrev(h)
97 97 b, c = clbranchinfo(r)
98 98 if c:
99 99 closed.append(h)
100 100
101 101 if rbheads:
102 102 rtiprev = max((int(clrev(node)) for node in rbheads))
103 103 cache = branchcache(
104 104 remotebranchmap, repo[rtiprev].node(), rtiprev,
105 105 closednodes=closed)
106 106
107 107 # Try to stick it as low as possible
108 108 # filter above served are unlikely to be fetch from a clone
109 109 for candidate in ('base', 'immutable', 'served'):
110 110 rview = repo.filtered(candidate)
111 111 if cache.validfor(rview):
112 112 self._per_filter[candidate] = cache
113 113 cache.write(rview)
114 114 return
115 115
116 116 def clear(self):
117 117 self._per_filter.clear()
118 118
119 119 def _unknownnode(node):
120 120 """ raises ValueError when branchcache found a node which does not exists
121 121 """
122 122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
123 123
124 124 class branchcache(object):
125 125 """A dict like object that hold branches heads cache.
126 126
127 127 This cache is used to avoid costly computations to determine all the
128 128 branch heads of a repo.
129 129
130 130 The cache is serialized on disk in the following format:
131 131
132 132 <tip hex node> <tip rev number> [optional filtered repo hex hash]
133 133 <branch head hex node> <open/closed state> <branch name>
134 134 <branch head hex node> <open/closed state> <branch name>
135 135 ...
136 136
137 137 The first line is used to check if the cache is still valid. If the
138 138 branch cache is for a filtered repo view, an optional third hash is
139 139 included that hashes the hashes of all filtered revisions.
140 140
141 141 The open/closed state is represented by a single letter 'o' or 'c'.
142 142 This field can be used to avoid changelog reads when determining if a
143 143 branch head closes a branch or not.
144 144 """
145 145
146 146 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
147 147 filteredhash=None, closednodes=None, hasnode=None):
148 148 """ hasnode is a function which can be used to verify whether changelog
149 149 has a given node or not. If it's not provided, we assume that every node
150 150 we have exists in changelog """
151 151 self.tipnode = tipnode
152 152 self.tiprev = tiprev
153 153 self.filteredhash = filteredhash
154 154 # closednodes is a set of nodes that close their branch. If the branch
155 155 # cache has been updated, it may contain nodes that are no longer
156 156 # heads.
157 157 if closednodes is None:
158 158 self._closednodes = set()
159 159 else:
160 160 self._closednodes = closednodes
161 161 self._entries = dict(entries)
162 162 # whether closed nodes are verified or not
163 163 self._closedverified = False
164 164 # branches for which nodes are verified
165 165 self._verifiedbranches = set()
166 166 self._hasnode = hasnode
167 167 if self._hasnode is None:
168 168 self._hasnode = lambda x: True
169 169
170 170 def _verifyclosed(self):
171 171 """ verify the closed nodes we have """
172 172 if self._closedverified:
173 173 return
174 174 for node in self._closednodes:
175 175 if not self._hasnode(node):
176 176 _unknownnode(node)
177 177
178 178 self._closedverified = True
179 179
180 180 def _verifybranch(self, branch):
181 181 """ verify head nodes for the given branch. """
182 182 if branch not in self._entries or branch in self._verifiedbranches:
183 183 return
184 184 for n in self._entries[branch]:
185 185 if not self._hasnode(n):
186 186 _unknownnode(n)
187 187
188 188 self._verifiedbranches.add(branch)
189 189
190 190 def _verifyall(self):
191 191 """ verifies nodes of all the branches """
192 192 needverification = set(self._entries.keys()) - self._verifiedbranches
193 193 for b in needverification:
194 194 self._verifybranch(b)
195 195
196 196 def __iter__(self):
197 197 return iter(self._entries)
198 198
199 199 def __setitem__(self, key, value):
200 200 self._entries[key] = value
201 201
202 202 def __getitem__(self, key):
203 203 self._verifybranch(key)
204 204 return self._entries[key]
205 205
206 206 def __contains__(self, key):
207 207 self._verifybranch(key)
208 208 return key in self._entries
209 209
210 210 def iteritems(self):
211 211 for k, v in self._entries.iteritems():
212 212 self._verifybranch(k)
213 213 yield k, v
214 214
215 215 def hasbranch(self, label):
216 216 """ checks whether a branch of this name exists or not """
217 217 self._verifybranch(label)
218 218 return label in self._entries
219 219
220 220 @classmethod
221 221 def fromfile(cls, repo):
222 222 f = None
223 223 try:
224 224 f = repo.cachevfs(cls._filename(repo))
225 225 lineiter = iter(f)
226 226 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
227 227 last, lrev = cachekey[:2]
228 228 last, lrev = bin(last), int(lrev)
229 229 filteredhash = None
230 230 hasnode = repo.changelog.hasnode
231 231 if len(cachekey) > 2:
232 232 filteredhash = bin(cachekey[2])
233 233 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
234 234 hasnode=hasnode)
235 235 if not bcache.validfor(repo):
236 236 # invalidate the cache
237 237 raise ValueError(r'tip differs')
238 238 bcache.load(repo, lineiter)
239 239 except (IOError, OSError):
240 240 return None
241 241
242 242 except Exception as inst:
243 243 if repo.ui.debugflag:
244 244 msg = 'invalid branchheads cache'
245 245 if repo.filtername is not None:
246 246 msg += ' (%s)' % repo.filtername
247 247 msg += ': %s\n'
248 248 repo.ui.debug(msg % pycompat.bytestr(inst))
249 249 bcache = None
250 250
251 251 finally:
252 252 if f:
253 253 f.close()
254 254
255 255 return bcache
256 256
257 257 def load(self, repo, lineiter):
258 258 """ fully loads the branchcache by reading from the file using the line
259 259 iterator passed"""
260 260 for line in lineiter:
261 261 line = line.rstrip('\n')
262 262 if not line:
263 263 continue
264 264 node, state, label = line.split(" ", 2)
265 265 if state not in 'oc':
266 266 raise ValueError(r'invalid branch state')
267 267 label = encoding.tolocal(label.strip())
268 268 node = bin(node)
269 269 self._entries.setdefault(label, []).append(node)
270 270 if state == 'c':
271 271 self._closednodes.add(node)
272 272
273 273 @staticmethod
274 274 def _filename(repo):
275 275 """name of a branchcache file for a given repo or repoview"""
276 276 filename = "branch2"
277 277 if repo.filtername:
278 278 filename = '%s-%s' % (filename, repo.filtername)
279 279 return filename
280 280
281 281 def validfor(self, repo):
282 282 """Is the cache content valid regarding a repo
283 283
284 284 - False when cached tipnode is unknown or if we detect a strip.
285 285 - True when cache is up to date or a subset of current repo."""
286 286 try:
287 287 return ((self.tipnode == repo.changelog.node(self.tiprev))
288 288 and (self.filteredhash ==
289 289 scmutil.filteredhash(repo, self.tiprev)))
290 290 except IndexError:
291 291 return False
292 292
293 293 def _branchtip(self, heads):
294 294 '''Return tuple with last open head in heads and false,
295 295 otherwise return last closed head and true.'''
296 296 tip = heads[-1]
297 297 closed = True
298 298 for h in reversed(heads):
299 299 if h not in self._closednodes:
300 300 tip = h
301 301 closed = False
302 302 break
303 303 return tip, closed
304 304
305 305 def branchtip(self, branch):
306 306 '''Return the tipmost open head on branch head, otherwise return the
307 307 tipmost closed head on branch.
308 308 Raise KeyError for unknown branch.'''
309 309 return self._branchtip(self[branch])[0]
310 310
311 311 def iteropen(self, nodes):
312 312 return (n for n in nodes if n not in self._closednodes)
313 313
314 314 def branchheads(self, branch, closed=False):
315 315 self._verifybranch(branch)
316 316 heads = self._entries[branch]
317 317 if not closed:
318 318 heads = list(self.iteropen(heads))
319 319 return heads
320 320
321 321 def iterbranches(self):
322 322 for bn, heads in self.iteritems():
323 323 yield (bn, heads) + self._branchtip(heads)
324 324
325 325 def iterheads(self):
326 326 """ returns all the heads """
327 327 self._verifyall()
328 328 return self._entries.itervalues()
329 329
330 330 def copy(self):
331 331 """return an deep copy of the branchcache object"""
332 332 return type(self)(
333 333 self._entries, self.tipnode, self.tiprev, self.filteredhash,
334 334 self._closednodes)
335 335
336 336 def write(self, repo):
337 337 try:
338 338 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
339 339 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
340 340 if self.filteredhash is not None:
341 341 cachekey.append(hex(self.filteredhash))
342 342 f.write(" ".join(cachekey) + '\n')
343 343 nodecount = 0
344 for label, nodes in sorted(self.iteritems()):
344 for label, nodes in sorted(self._entries.iteritems()):
345 345 label = encoding.fromlocal(label)
346 346 for node in nodes:
347 347 nodecount += 1
348 348 if node in self._closednodes:
349 349 state = 'c'
350 350 else:
351 351 state = 'o'
352 352 f.write("%s %s %s\n" % (hex(node), state, label))
353 353 f.close()
354 354 repo.ui.log('branchcache',
355 355 'wrote %s branch cache with %d labels and %d nodes\n',
356 356 repo.filtername, len(self._entries), nodecount)
357 357 except (IOError, OSError, error.Abort) as inst:
358 358 # Abort may be raised by read only opener, so log and continue
359 359 repo.ui.debug("couldn't write branch cache: %s\n" %
360 360 stringutil.forcebytestr(inst))
361 361
362 362 def update(self, repo, revgen):
363 363 """Given a branchhead cache, self, that may have extra nodes or be
364 364 missing heads, and a generator of nodes that are strictly a superset of
365 365 heads missing, this function updates self to be correct.
366 366 """
367 367 starttime = util.timer()
368 368 cl = repo.changelog
369 369 # collect new branch entries
370 370 newbranches = {}
371 371 getbranchinfo = repo.revbranchcache().branchinfo
372 372 for r in revgen:
373 373 branch, closesbranch = getbranchinfo(r)
374 374 newbranches.setdefault(branch, []).append(r)
375 375 if closesbranch:
376 376 self._closednodes.add(cl.node(r))
377 377
378 378 # fetch current topological heads to speed up filtering
379 379 topoheads = set(cl.headrevs())
380 380
381 381 # if older branchheads are reachable from new ones, they aren't
382 382 # really branchheads. Note checking parents is insufficient:
383 383 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
384 384 for branch, newheadrevs in newbranches.iteritems():
385 385 bheads = self._entries.setdefault(branch, [])
386 386 bheadset = set(cl.rev(node) for node in bheads)
387 387
388 388 # This have been tested True on all internal usage of this function.
389 389 # run it again in case of doubt
390 390 # assert not (set(bheadrevs) & set(newheadrevs))
391 391 bheadset.update(newheadrevs)
392 392
393 393 # This prunes out two kinds of heads - heads that are superseded by
394 394 # a head in newheadrevs, and newheadrevs that are not heads because
395 395 # an existing head is their descendant.
396 396 uncertain = bheadset - topoheads
397 397 if uncertain:
398 398 floorrev = min(uncertain)
399 399 ancestors = set(cl.ancestors(newheadrevs, floorrev))
400 400 bheadset -= ancestors
401 401 bheadrevs = sorted(bheadset)
402 402 self[branch] = [cl.node(rev) for rev in bheadrevs]
403 403 tiprev = bheadrevs[-1]
404 404 if tiprev > self.tiprev:
405 405 self.tipnode = cl.node(tiprev)
406 406 self.tiprev = tiprev
407 407
408 408 if not self.validfor(repo):
409 409 # cache key are not valid anymore
410 410 self.tipnode = nullid
411 411 self.tiprev = nullrev
412 412 for heads in self.iterheads():
413 413 tiprev = max(cl.rev(node) for node in heads)
414 414 if tiprev > self.tiprev:
415 415 self.tipnode = cl.node(tiprev)
416 416 self.tiprev = tiprev
417 417 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
418 418
419 419 duration = util.timer() - starttime
420 420 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
421 421 repo.filtername or b'None', duration)
422 422
423 423 self.write(repo)
424 424
425 425
426 426 class remotebranchcache(branchcache):
427 427 """Branchmap info for a remote connection, should not write locally"""
428 428 def write(self, repo):
429 429 pass
430 430
431 431
432 432 # Revision branch info cache
433 433
434 434 _rbcversion = '-v1'
435 435 _rbcnames = 'rbc-names' + _rbcversion
436 436 _rbcrevs = 'rbc-revs' + _rbcversion
437 437 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
438 438 _rbcrecfmt = '>4sI'
439 439 _rbcrecsize = calcsize(_rbcrecfmt)
440 440 _rbcnodelen = 4
441 441 _rbcbranchidxmask = 0x7fffffff
442 442 _rbccloseflag = 0x80000000
443 443
444 444 class revbranchcache(object):
445 445 """Persistent cache, mapping from revision number to branch name and close.
446 446 This is a low level cache, independent of filtering.
447 447
448 448 Branch names are stored in rbc-names in internal encoding separated by 0.
449 449 rbc-names is append-only, and each branch name is only stored once and will
450 450 thus have a unique index.
451 451
452 452 The branch info for each revision is stored in rbc-revs as constant size
453 453 records. The whole file is read into memory, but it is only 'parsed' on
454 454 demand. The file is usually append-only but will be truncated if repo
455 455 modification is detected.
456 456 The record for each revision contains the first 4 bytes of the
457 457 corresponding node hash, and the record is only used if it still matches.
458 458 Even a completely trashed rbc-revs fill thus still give the right result
459 459 while converging towards full recovery ... assuming no incorrectly matching
460 460 node hashes.
461 461 The record also contains 4 bytes where 31 bits contains the index of the
462 462 branch and the last bit indicate that it is a branch close commit.
463 463 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
464 464 and will grow with it but be 1/8th of its size.
465 465 """
466 466
467 467 def __init__(self, repo, readonly=True):
468 468 assert repo.filtername is None
469 469 self._repo = repo
470 470 self._names = [] # branch names in local encoding with static index
471 471 self._rbcrevs = bytearray()
472 472 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
473 473 try:
474 474 bndata = repo.cachevfs.read(_rbcnames)
475 475 self._rbcsnameslen = len(bndata) # for verification before writing
476 476 if bndata:
477 477 self._names = [encoding.tolocal(bn)
478 478 for bn in bndata.split('\0')]
479 479 except (IOError, OSError):
480 480 if readonly:
481 481 # don't try to use cache - fall back to the slow path
482 482 self.branchinfo = self._branchinfo
483 483
484 484 if self._names:
485 485 try:
486 486 data = repo.cachevfs.read(_rbcrevs)
487 487 self._rbcrevs[:] = data
488 488 except (IOError, OSError) as inst:
489 489 repo.ui.debug("couldn't read revision branch cache: %s\n" %
490 490 stringutil.forcebytestr(inst))
491 491 # remember number of good records on disk
492 492 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
493 493 len(repo.changelog))
494 494 if self._rbcrevslen == 0:
495 495 self._names = []
496 496 self._rbcnamescount = len(self._names) # number of names read at
497 497 # _rbcsnameslen
498 498
499 499 def _clear(self):
500 500 self._rbcsnameslen = 0
501 501 del self._names[:]
502 502 self._rbcnamescount = 0
503 503 self._rbcrevslen = len(self._repo.changelog)
504 504 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
505 505 util.clearcachedproperty(self, '_namesreverse')
506 506
507 507 @util.propertycache
508 508 def _namesreverse(self):
509 509 return dict((b, r) for r, b in enumerate(self._names))
510 510
511 511 def branchinfo(self, rev):
512 512 """Return branch name and close flag for rev, using and updating
513 513 persistent cache."""
514 514 changelog = self._repo.changelog
515 515 rbcrevidx = rev * _rbcrecsize
516 516
517 517 # avoid negative index, changelog.read(nullrev) is fast without cache
518 518 if rev == nullrev:
519 519 return changelog.branchinfo(rev)
520 520
521 521 # if requested rev isn't allocated, grow and cache the rev info
522 522 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
523 523 return self._branchinfo(rev)
524 524
525 525 # fast path: extract data from cache, use it if node is matching
526 526 reponode = changelog.node(rev)[:_rbcnodelen]
527 527 cachenode, branchidx = unpack_from(
528 528 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
529 529 close = bool(branchidx & _rbccloseflag)
530 530 if close:
531 531 branchidx &= _rbcbranchidxmask
532 532 if cachenode == '\0\0\0\0':
533 533 pass
534 534 elif cachenode == reponode:
535 535 try:
536 536 return self._names[branchidx], close
537 537 except IndexError:
538 538 # recover from invalid reference to unknown branch
539 539 self._repo.ui.debug("referenced branch names not found"
540 540 " - rebuilding revision branch cache from scratch\n")
541 541 self._clear()
542 542 else:
543 543 # rev/node map has changed, invalidate the cache from here up
544 544 self._repo.ui.debug("history modification detected - truncating "
545 545 "revision branch cache to revision %d\n" % rev)
546 546 truncate = rbcrevidx + _rbcrecsize
547 547 del self._rbcrevs[truncate:]
548 548 self._rbcrevslen = min(self._rbcrevslen, truncate)
549 549
550 550 # fall back to slow path and make sure it will be written to disk
551 551 return self._branchinfo(rev)
552 552
553 553 def _branchinfo(self, rev):
554 554 """Retrieve branch info from changelog and update _rbcrevs"""
555 555 changelog = self._repo.changelog
556 556 b, close = changelog.branchinfo(rev)
557 557 if b in self._namesreverse:
558 558 branchidx = self._namesreverse[b]
559 559 else:
560 560 branchidx = len(self._names)
561 561 self._names.append(b)
562 562 self._namesreverse[b] = branchidx
563 563 reponode = changelog.node(rev)
564 564 if close:
565 565 branchidx |= _rbccloseflag
566 566 self._setcachedata(rev, reponode, branchidx)
567 567 return b, close
568 568
569 569 def setdata(self, branch, rev, node, close):
570 570 """add new data information to the cache"""
571 571 if branch in self._namesreverse:
572 572 branchidx = self._namesreverse[branch]
573 573 else:
574 574 branchidx = len(self._names)
575 575 self._names.append(branch)
576 576 self._namesreverse[branch] = branchidx
577 577 if close:
578 578 branchidx |= _rbccloseflag
579 579 self._setcachedata(rev, node, branchidx)
580 580 # If no cache data were readable (non exists, bad permission, etc)
581 581 # the cache was bypassing itself by setting:
582 582 #
583 583 # self.branchinfo = self._branchinfo
584 584 #
585 585 # Since we now have data in the cache, we need to drop this bypassing.
586 586 if r'branchinfo' in vars(self):
587 587 del self.branchinfo
588 588
589 589 def _setcachedata(self, rev, node, branchidx):
590 590 """Writes the node's branch data to the in-memory cache data."""
591 591 if rev == nullrev:
592 592 return
593 593 rbcrevidx = rev * _rbcrecsize
594 594 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
595 595 self._rbcrevs.extend('\0' *
596 596 (len(self._repo.changelog) * _rbcrecsize -
597 597 len(self._rbcrevs)))
598 598 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
599 599 self._rbcrevslen = min(self._rbcrevslen, rev)
600 600
601 601 tr = self._repo.currenttransaction()
602 602 if tr:
603 603 tr.addfinalize('write-revbranchcache', self.write)
604 604
605 605 def write(self, tr=None):
606 606 """Save branch cache if it is dirty."""
607 607 repo = self._repo
608 608 wlock = None
609 609 step = ''
610 610 try:
611 611 if self._rbcnamescount < len(self._names):
612 612 step = ' names'
613 613 wlock = repo.wlock(wait=False)
614 614 if self._rbcnamescount != 0:
615 615 f = repo.cachevfs.open(_rbcnames, 'ab')
616 616 if f.tell() == self._rbcsnameslen:
617 617 f.write('\0')
618 618 else:
619 619 f.close()
620 620 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
621 621 self._rbcnamescount = 0
622 622 self._rbcrevslen = 0
623 623 if self._rbcnamescount == 0:
624 624 # before rewriting names, make sure references are removed
625 625 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
626 626 f = repo.cachevfs.open(_rbcnames, 'wb')
627 627 f.write('\0'.join(encoding.fromlocal(b)
628 628 for b in self._names[self._rbcnamescount:]))
629 629 self._rbcsnameslen = f.tell()
630 630 f.close()
631 631 self._rbcnamescount = len(self._names)
632 632
633 633 start = self._rbcrevslen * _rbcrecsize
634 634 if start != len(self._rbcrevs):
635 635 step = ''
636 636 if wlock is None:
637 637 wlock = repo.wlock(wait=False)
638 638 revs = min(len(repo.changelog),
639 639 len(self._rbcrevs) // _rbcrecsize)
640 640 f = repo.cachevfs.open(_rbcrevs, 'ab')
641 641 if f.tell() != start:
642 642 repo.ui.debug("truncating cache/%s to %d\n"
643 643 % (_rbcrevs, start))
644 644 f.seek(start)
645 645 if f.tell() != start:
646 646 start = 0
647 647 f.seek(start)
648 648 f.truncate()
649 649 end = revs * _rbcrecsize
650 650 f.write(self._rbcrevs[start:end])
651 651 f.close()
652 652 self._rbcrevslen = revs
653 653 except (IOError, OSError, error.Abort, error.LockError) as inst:
654 654 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
655 655 % (step, stringutil.forcebytestr(inst)))
656 656 finally:
657 657 if wlock is not None:
658 658 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now