##// END OF EJS Templates
branchmap: drop branchcache.setdefault() (API)...
Pulkit Goyal -
r42170:7546bf46 default
parent child Browse files
Show More
@@ -1,619 +1,616 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import (
26 26 stringutil,
27 27 )
28 28
29 29 calcsize = struct.calcsize
30 30 pack_into = struct.pack_into
31 31 unpack_from = struct.unpack_from
32 32
33 33
34 34 ### Nearest subset relation
35 35 # Nearest subset of filter X is a filter Y so that:
36 36 # * Y is included in X,
37 37 # * X - Y is as small as possible.
38 38 # This create and ordering used for branchmap purpose.
39 39 # the ordering may be partial
40 40 subsettable = {None: 'visible',
41 41 'visible-hidden': 'visible',
42 42 'visible': 'served',
43 43 'served': 'immutable',
44 44 'immutable': 'base'}
45 45
46 46
47 47 class BranchMapCache(object):
48 48 """mapping of filtered views of repo with their branchcache"""
49 49 def __init__(self):
50 50 self._per_filter = {}
51 51
52 52 def __getitem__(self, repo):
53 53 self.updatecache(repo)
54 54 return self._per_filter[repo.filtername]
55 55
56 56 def updatecache(self, repo):
57 57 """Update the cache for the given filtered view on a repository"""
58 58 # This can trigger updates for the caches for subsets of the filtered
59 59 # view, e.g. when there is no cache for this filtered view or the cache
60 60 # is stale.
61 61
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 bcache = self._per_filter.get(filtername)
65 65 if bcache is None or not bcache.validfor(repo):
66 66 # cache object missing or cache object stale? Read from disk
67 67 bcache = branchcache.fromfile(repo)
68 68
69 69 revs = []
70 70 if bcache is None:
71 71 # no (fresh) cache available anymore, perhaps we can re-use
72 72 # the cache for a subset, then extend that to add info on missing
73 73 # revisions.
74 74 subsetname = subsettable.get(filtername)
75 75 if subsetname is not None:
76 76 subset = repo.filtered(subsetname)
77 77 bcache = self[subset].copy()
78 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 80 else:
81 81 # nothing to fall back on, start empty.
82 82 bcache = branchcache()
83 83
84 84 revs.extend(cl.revs(start=bcache.tiprev + 1))
85 85 if revs:
86 86 bcache.update(repo, revs)
87 87
88 88 assert bcache.validfor(repo), filtername
89 89 self._per_filter[repo.filtername] = bcache
90 90
91 91 def replace(self, repo, remotebranchmap):
92 92 """Replace the branchmap cache for a repo with a branch mapping.
93 93
94 94 This is likely only called during clone with a branch map from a
95 95 remote.
96 96
97 97 """
98 98 cl = repo.changelog
99 99 clrev = cl.rev
100 100 clbranchinfo = cl.branchinfo
101 101 rbheads = []
102 102 closed = []
103 103 for bheads in remotebranchmap.itervalues():
104 104 rbheads += bheads
105 105 for h in bheads:
106 106 r = clrev(h)
107 107 b, c = clbranchinfo(r)
108 108 if c:
109 109 closed.append(h)
110 110
111 111 if rbheads:
112 112 rtiprev = max((int(clrev(node)) for node in rbheads))
113 113 cache = branchcache(
114 114 remotebranchmap, repo[rtiprev].node(), rtiprev,
115 115 closednodes=closed)
116 116
117 117 # Try to stick it as low as possible
118 118 # filter above served are unlikely to be fetch from a clone
119 119 for candidate in ('base', 'immutable', 'served'):
120 120 rview = repo.filtered(candidate)
121 121 if cache.validfor(rview):
122 122 self._per_filter[candidate] = cache
123 123 cache.write(rview)
124 124 return
125 125
126 126 def clear(self):
127 127 self._per_filter.clear()
128 128
129 129
130 130 class branchcache(object):
131 131 """A dict like object that hold branches heads cache.
132 132
133 133 This cache is used to avoid costly computations to determine all the
134 134 branch heads of a repo.
135 135
136 136 The cache is serialized on disk in the following format:
137 137
138 138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
139 139 <branch head hex node> <open/closed state> <branch name>
140 140 <branch head hex node> <open/closed state> <branch name>
141 141 ...
142 142
143 143 The first line is used to check if the cache is still valid. If the
144 144 branch cache is for a filtered repo view, an optional third hash is
145 145 included that hashes the hashes of all filtered revisions.
146 146
147 147 The open/closed state is represented by a single letter 'o' or 'c'.
148 148 This field can be used to avoid changelog reads when determining if a
149 149 branch head closes a branch or not.
150 150 """
151 151
152 152 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
153 153 filteredhash=None, closednodes=None):
154 154 self.tipnode = tipnode
155 155 self.tiprev = tiprev
156 156 self.filteredhash = filteredhash
157 157 # closednodes is a set of nodes that close their branch. If the branch
158 158 # cache has been updated, it may contain nodes that are no longer
159 159 # heads.
160 160 if closednodes is None:
161 161 self._closednodes = set()
162 162 else:
163 163 self._closednodes = closednodes
164 164 self.entries = dict(entries)
165 165
166 166 def __iter__(self):
167 167 return iter(self.entries)
168 168
169 169 def __setitem__(self, key, value):
170 170 self.entries[key] = value
171 171
172 172 def __getitem__(self, key):
173 173 return self.entries[key]
174 174
175 def setdefault(self, *args):
176 return self.entries.setdefault(*args)
177
178 175 def iteritems(self):
179 176 return self.entries.iteritems()
180 177
181 178 @classmethod
182 179 def fromfile(cls, repo):
183 180 f = None
184 181 try:
185 182 f = repo.cachevfs(cls._filename(repo))
186 183 lineiter = iter(f)
187 184 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
188 185 last, lrev = cachekey[:2]
189 186 last, lrev = bin(last), int(lrev)
190 187 filteredhash = None
191 188 if len(cachekey) > 2:
192 189 filteredhash = bin(cachekey[2])
193 190 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
194 191 if not bcache.validfor(repo):
195 192 # invalidate the cache
196 193 raise ValueError(r'tip differs')
197 194 bcache.load(repo, lineiter)
198 195 except (IOError, OSError):
199 196 return None
200 197
201 198 except Exception as inst:
202 199 if repo.ui.debugflag:
203 200 msg = 'invalid branchheads cache'
204 201 if repo.filtername is not None:
205 202 msg += ' (%s)' % repo.filtername
206 203 msg += ': %s\n'
207 204 repo.ui.debug(msg % pycompat.bytestr(inst))
208 205 bcache = None
209 206
210 207 finally:
211 208 if f:
212 209 f.close()
213 210
214 211 return bcache
215 212
216 213 def load(self, repo, lineiter):
217 214 """ fully loads the branchcache by reading from the file using the line
218 215 iterator passed"""
219 216 cl = repo.changelog
220 217 for line in lineiter:
221 218 line = line.rstrip('\n')
222 219 if not line:
223 220 continue
224 221 node, state, label = line.split(" ", 2)
225 222 if state not in 'oc':
226 223 raise ValueError(r'invalid branch state')
227 224 label = encoding.tolocal(label.strip())
228 225 node = bin(node)
229 226 if not cl.hasnode(node):
230 227 raise ValueError(
231 228 r'node %s does not exist' % pycompat.sysstr(hex(node)))
232 self.setdefault(label, []).append(node)
229 self.entries.setdefault(label, []).append(node)
233 230 if state == 'c':
234 231 self._closednodes.add(node)
235 232
236 233 @staticmethod
237 234 def _filename(repo):
238 235 """name of a branchcache file for a given repo or repoview"""
239 236 filename = "branch2"
240 237 if repo.filtername:
241 238 filename = '%s-%s' % (filename, repo.filtername)
242 239 return filename
243 240
244 241 def validfor(self, repo):
245 242 """Is the cache content valid regarding a repo
246 243
247 244 - False when cached tipnode is unknown or if we detect a strip.
248 245 - True when cache is up to date or a subset of current repo."""
249 246 try:
250 247 return ((self.tipnode == repo.changelog.node(self.tiprev))
251 248 and (self.filteredhash ==
252 249 scmutil.filteredhash(repo, self.tiprev)))
253 250 except IndexError:
254 251 return False
255 252
256 253 def _branchtip(self, heads):
257 254 '''Return tuple with last open head in heads and false,
258 255 otherwise return last closed head and true.'''
259 256 tip = heads[-1]
260 257 closed = True
261 258 for h in reversed(heads):
262 259 if h not in self._closednodes:
263 260 tip = h
264 261 closed = False
265 262 break
266 263 return tip, closed
267 264
268 265 def branchtip(self, branch):
269 266 '''Return the tipmost open head on branch head, otherwise return the
270 267 tipmost closed head on branch.
271 268 Raise KeyError for unknown branch.'''
272 269 return self._branchtip(self[branch])[0]
273 270
274 271 def iteropen(self, nodes):
275 272 return (n for n in nodes if n not in self._closednodes)
276 273
277 274 def branchheads(self, branch, closed=False):
278 275 heads = self[branch]
279 276 if not closed:
280 277 heads = list(self.iteropen(heads))
281 278 return heads
282 279
283 280 def iterbranches(self):
284 281 for bn, heads in self.iteritems():
285 282 yield (bn, heads) + self._branchtip(heads)
286 283
287 284 def iterheads(self):
288 285 """ returns all the heads """
289 286 return self.entries.itervalues()
290 287
291 288 def copy(self):
292 289 """return an deep copy of the branchcache object"""
293 290 return branchcache(
294 291 self.entries, self.tipnode, self.tiprev, self.filteredhash,
295 292 self._closednodes)
296 293
297 294 def write(self, repo):
298 295 try:
299 296 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
300 297 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
301 298 if self.filteredhash is not None:
302 299 cachekey.append(hex(self.filteredhash))
303 300 f.write(" ".join(cachekey) + '\n')
304 301 nodecount = 0
305 302 for label, nodes in sorted(self.iteritems()):
306 303 label = encoding.fromlocal(label)
307 304 for node in nodes:
308 305 nodecount += 1
309 306 if node in self._closednodes:
310 307 state = 'c'
311 308 else:
312 309 state = 'o'
313 310 f.write("%s %s %s\n" % (hex(node), state, label))
314 311 f.close()
315 312 repo.ui.log('branchcache',
316 313 'wrote %s branch cache with %d labels and %d nodes\n',
317 314 repo.filtername, len(self.entries), nodecount)
318 315 except (IOError, OSError, error.Abort) as inst:
319 316 # Abort may be raised by read only opener, so log and continue
320 317 repo.ui.debug("couldn't write branch cache: %s\n" %
321 318 stringutil.forcebytestr(inst))
322 319
323 320 def update(self, repo, revgen):
324 321 """Given a branchhead cache, self, that may have extra nodes or be
325 322 missing heads, and a generator of nodes that are strictly a superset of
326 323 heads missing, this function updates self to be correct.
327 324 """
328 325 starttime = util.timer()
329 326 cl = repo.changelog
330 327 # collect new branch entries
331 328 newbranches = {}
332 329 getbranchinfo = repo.revbranchcache().branchinfo
333 330 for r in revgen:
334 331 branch, closesbranch = getbranchinfo(r)
335 332 newbranches.setdefault(branch, []).append(r)
336 333 if closesbranch:
337 334 self._closednodes.add(cl.node(r))
338 335
339 336 # fetch current topological heads to speed up filtering
340 337 topoheads = set(cl.headrevs())
341 338
342 339 # if older branchheads are reachable from new ones, they aren't
343 340 # really branchheads. Note checking parents is insufficient:
344 341 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
345 342 for branch, newheadrevs in newbranches.iteritems():
346 bheads = self.setdefault(branch, [])
343 bheads = self.entries.setdefault(branch, [])
347 344 bheadset = set(cl.rev(node) for node in bheads)
348 345
349 346 # This have been tested True on all internal usage of this function.
350 347 # run it again in case of doubt
351 348 # assert not (set(bheadrevs) & set(newheadrevs))
352 349 bheadset.update(newheadrevs)
353 350
354 351 # This prunes out two kinds of heads - heads that are superseded by
355 352 # a head in newheadrevs, and newheadrevs that are not heads because
356 353 # an existing head is their descendant.
357 354 uncertain = bheadset - topoheads
358 355 if uncertain:
359 356 floorrev = min(uncertain)
360 357 ancestors = set(cl.ancestors(newheadrevs, floorrev))
361 358 bheadset -= ancestors
362 359 bheadrevs = sorted(bheadset)
363 360 self[branch] = [cl.node(rev) for rev in bheadrevs]
364 361 tiprev = bheadrevs[-1]
365 362 if tiprev > self.tiprev:
366 363 self.tipnode = cl.node(tiprev)
367 364 self.tiprev = tiprev
368 365
369 366 if not self.validfor(repo):
370 367 # cache key are not valid anymore
371 368 self.tipnode = nullid
372 369 self.tiprev = nullrev
373 370 for heads in self.iterheads():
374 371 tiprev = max(cl.rev(node) for node in heads)
375 372 if tiprev > self.tiprev:
376 373 self.tipnode = cl.node(tiprev)
377 374 self.tiprev = tiprev
378 375 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
379 376
380 377 duration = util.timer() - starttime
381 378 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
382 379 repo.filtername or b'None', duration)
383 380
384 381 self.write(repo)
385 382
386 383
387 384 class remotebranchcache(branchcache):
388 385 """Branchmap info for a remote connection, should not write locally"""
389 386 def write(self, repo):
390 387 pass
391 388
392 389
393 390 # Revision branch info cache
394 391
395 392 _rbcversion = '-v1'
396 393 _rbcnames = 'rbc-names' + _rbcversion
397 394 _rbcrevs = 'rbc-revs' + _rbcversion
398 395 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
399 396 _rbcrecfmt = '>4sI'
400 397 _rbcrecsize = calcsize(_rbcrecfmt)
401 398 _rbcnodelen = 4
402 399 _rbcbranchidxmask = 0x7fffffff
403 400 _rbccloseflag = 0x80000000
404 401
405 402 class revbranchcache(object):
406 403 """Persistent cache, mapping from revision number to branch name and close.
407 404 This is a low level cache, independent of filtering.
408 405
409 406 Branch names are stored in rbc-names in internal encoding separated by 0.
410 407 rbc-names is append-only, and each branch name is only stored once and will
411 408 thus have a unique index.
412 409
413 410 The branch info for each revision is stored in rbc-revs as constant size
414 411 records. The whole file is read into memory, but it is only 'parsed' on
415 412 demand. The file is usually append-only but will be truncated if repo
416 413 modification is detected.
417 414 The record for each revision contains the first 4 bytes of the
418 415 corresponding node hash, and the record is only used if it still matches.
419 416 Even a completely trashed rbc-revs fill thus still give the right result
420 417 while converging towards full recovery ... assuming no incorrectly matching
421 418 node hashes.
422 419 The record also contains 4 bytes where 31 bits contains the index of the
423 420 branch and the last bit indicate that it is a branch close commit.
424 421 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
425 422 and will grow with it but be 1/8th of its size.
426 423 """
427 424
428 425 def __init__(self, repo, readonly=True):
429 426 assert repo.filtername is None
430 427 self._repo = repo
431 428 self._names = [] # branch names in local encoding with static index
432 429 self._rbcrevs = bytearray()
433 430 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
434 431 try:
435 432 bndata = repo.cachevfs.read(_rbcnames)
436 433 self._rbcsnameslen = len(bndata) # for verification before writing
437 434 if bndata:
438 435 self._names = [encoding.tolocal(bn)
439 436 for bn in bndata.split('\0')]
440 437 except (IOError, OSError):
441 438 if readonly:
442 439 # don't try to use cache - fall back to the slow path
443 440 self.branchinfo = self._branchinfo
444 441
445 442 if self._names:
446 443 try:
447 444 data = repo.cachevfs.read(_rbcrevs)
448 445 self._rbcrevs[:] = data
449 446 except (IOError, OSError) as inst:
450 447 repo.ui.debug("couldn't read revision branch cache: %s\n" %
451 448 stringutil.forcebytestr(inst))
452 449 # remember number of good records on disk
453 450 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
454 451 len(repo.changelog))
455 452 if self._rbcrevslen == 0:
456 453 self._names = []
457 454 self._rbcnamescount = len(self._names) # number of names read at
458 455 # _rbcsnameslen
459 456
460 457 def _clear(self):
461 458 self._rbcsnameslen = 0
462 459 del self._names[:]
463 460 self._rbcnamescount = 0
464 461 self._rbcrevslen = len(self._repo.changelog)
465 462 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
466 463 util.clearcachedproperty(self, '_namesreverse')
467 464
468 465 @util.propertycache
469 466 def _namesreverse(self):
470 467 return dict((b, r) for r, b in enumerate(self._names))
471 468
472 469 def branchinfo(self, rev):
473 470 """Return branch name and close flag for rev, using and updating
474 471 persistent cache."""
475 472 changelog = self._repo.changelog
476 473 rbcrevidx = rev * _rbcrecsize
477 474
478 475 # avoid negative index, changelog.read(nullrev) is fast without cache
479 476 if rev == nullrev:
480 477 return changelog.branchinfo(rev)
481 478
482 479 # if requested rev isn't allocated, grow and cache the rev info
483 480 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
484 481 return self._branchinfo(rev)
485 482
486 483 # fast path: extract data from cache, use it if node is matching
487 484 reponode = changelog.node(rev)[:_rbcnodelen]
488 485 cachenode, branchidx = unpack_from(
489 486 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
490 487 close = bool(branchidx & _rbccloseflag)
491 488 if close:
492 489 branchidx &= _rbcbranchidxmask
493 490 if cachenode == '\0\0\0\0':
494 491 pass
495 492 elif cachenode == reponode:
496 493 try:
497 494 return self._names[branchidx], close
498 495 except IndexError:
499 496 # recover from invalid reference to unknown branch
500 497 self._repo.ui.debug("referenced branch names not found"
501 498 " - rebuilding revision branch cache from scratch\n")
502 499 self._clear()
503 500 else:
504 501 # rev/node map has changed, invalidate the cache from here up
505 502 self._repo.ui.debug("history modification detected - truncating "
506 503 "revision branch cache to revision %d\n" % rev)
507 504 truncate = rbcrevidx + _rbcrecsize
508 505 del self._rbcrevs[truncate:]
509 506 self._rbcrevslen = min(self._rbcrevslen, truncate)
510 507
511 508 # fall back to slow path and make sure it will be written to disk
512 509 return self._branchinfo(rev)
513 510
514 511 def _branchinfo(self, rev):
515 512 """Retrieve branch info from changelog and update _rbcrevs"""
516 513 changelog = self._repo.changelog
517 514 b, close = changelog.branchinfo(rev)
518 515 if b in self._namesreverse:
519 516 branchidx = self._namesreverse[b]
520 517 else:
521 518 branchidx = len(self._names)
522 519 self._names.append(b)
523 520 self._namesreverse[b] = branchidx
524 521 reponode = changelog.node(rev)
525 522 if close:
526 523 branchidx |= _rbccloseflag
527 524 self._setcachedata(rev, reponode, branchidx)
528 525 return b, close
529 526
530 527 def setdata(self, branch, rev, node, close):
531 528 """add new data information to the cache"""
532 529 if branch in self._namesreverse:
533 530 branchidx = self._namesreverse[branch]
534 531 else:
535 532 branchidx = len(self._names)
536 533 self._names.append(branch)
537 534 self._namesreverse[branch] = branchidx
538 535 if close:
539 536 branchidx |= _rbccloseflag
540 537 self._setcachedata(rev, node, branchidx)
541 538 # If no cache data were readable (non exists, bad permission, etc)
542 539 # the cache was bypassing itself by setting:
543 540 #
544 541 # self.branchinfo = self._branchinfo
545 542 #
546 543 # Since we now have data in the cache, we need to drop this bypassing.
547 544 if r'branchinfo' in vars(self):
548 545 del self.branchinfo
549 546
550 547 def _setcachedata(self, rev, node, branchidx):
551 548 """Writes the node's branch data to the in-memory cache data."""
552 549 if rev == nullrev:
553 550 return
554 551 rbcrevidx = rev * _rbcrecsize
555 552 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
556 553 self._rbcrevs.extend('\0' *
557 554 (len(self._repo.changelog) * _rbcrecsize -
558 555 len(self._rbcrevs)))
559 556 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
560 557 self._rbcrevslen = min(self._rbcrevslen, rev)
561 558
562 559 tr = self._repo.currenttransaction()
563 560 if tr:
564 561 tr.addfinalize('write-revbranchcache', self.write)
565 562
566 563 def write(self, tr=None):
567 564 """Save branch cache if it is dirty."""
568 565 repo = self._repo
569 566 wlock = None
570 567 step = ''
571 568 try:
572 569 if self._rbcnamescount < len(self._names):
573 570 step = ' names'
574 571 wlock = repo.wlock(wait=False)
575 572 if self._rbcnamescount != 0:
576 573 f = repo.cachevfs.open(_rbcnames, 'ab')
577 574 if f.tell() == self._rbcsnameslen:
578 575 f.write('\0')
579 576 else:
580 577 f.close()
581 578 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
582 579 self._rbcnamescount = 0
583 580 self._rbcrevslen = 0
584 581 if self._rbcnamescount == 0:
585 582 # before rewriting names, make sure references are removed
586 583 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
587 584 f = repo.cachevfs.open(_rbcnames, 'wb')
588 585 f.write('\0'.join(encoding.fromlocal(b)
589 586 for b in self._names[self._rbcnamescount:]))
590 587 self._rbcsnameslen = f.tell()
591 588 f.close()
592 589 self._rbcnamescount = len(self._names)
593 590
594 591 start = self._rbcrevslen * _rbcrecsize
595 592 if start != len(self._rbcrevs):
596 593 step = ''
597 594 if wlock is None:
598 595 wlock = repo.wlock(wait=False)
599 596 revs = min(len(repo.changelog),
600 597 len(self._rbcrevs) // _rbcrecsize)
601 598 f = repo.cachevfs.open(_rbcrevs, 'ab')
602 599 if f.tell() != start:
603 600 repo.ui.debug("truncating cache/%s to %d\n"
604 601 % (_rbcrevs, start))
605 602 f.seek(start)
606 603 if f.tell() != start:
607 604 start = 0
608 605 f.seek(start)
609 606 f.truncate()
610 607 end = revs * _rbcrecsize
611 608 f.write(self._rbcrevs[start:end])
612 609 f.close()
613 610 self._rbcrevslen = revs
614 611 except (IOError, OSError, error.Abort, error.LockError) as inst:
615 612 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
616 613 % (step, stringutil.forcebytestr(inst)))
617 614 finally:
618 615 if wlock is not None:
619 616 wlock.release()
General Comments 0
You need to be logged in to leave comments. Login now