##// END OF EJS Templates
py3: use util.forcebytestr to convert error messages to bytes...
Pulkit Goyal -
r36414:743b293c default
parent child Browse files
Show More
@@ -1,525 +1,526 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 pycompat,
22 22 scmutil,
23 23 util,
24 24 )
25 25
26 26 calcsize = struct.calcsize
27 27 pack_into = struct.pack_into
28 28 unpack_from = struct.unpack_from
29 29
30 30 def _filename(repo):
31 31 """name of a branchcache file for a given repo or repoview"""
32 32 filename = "branch2"
33 33 if repo.filtername:
34 34 filename = '%s-%s' % (filename, repo.filtername)
35 35 return filename
36 36
37 37 def read(repo):
38 38 try:
39 39 f = repo.cachevfs(_filename(repo))
40 40 lines = f.read().split('\n')
41 41 f.close()
42 42 except (IOError, OSError):
43 43 return None
44 44
45 45 try:
46 46 cachekey = lines.pop(0).split(" ", 2)
47 47 last, lrev = cachekey[:2]
48 48 last, lrev = bin(last), int(lrev)
49 49 filteredhash = None
50 50 if len(cachekey) > 2:
51 51 filteredhash = bin(cachekey[2])
52 52 partial = branchcache(tipnode=last, tiprev=lrev,
53 53 filteredhash=filteredhash)
54 54 if not partial.validfor(repo):
55 55 # invalidate the cache
56 56 raise ValueError(r'tip differs')
57 57 cl = repo.changelog
58 58 for l in lines:
59 59 if not l:
60 60 continue
61 61 node, state, label = l.split(" ", 2)
62 62 if state not in 'oc':
63 63 raise ValueError(r'invalid branch state')
64 64 label = encoding.tolocal(label.strip())
65 65 node = bin(node)
66 66 if not cl.hasnode(node):
67 67 raise ValueError(
68 68 r'node %s does not exist' % pycompat.sysstr(hex(node)))
69 69 partial.setdefault(label, []).append(node)
70 70 if state == 'c':
71 71 partial._closednodes.add(node)
72 72 except Exception as inst:
73 73 if repo.ui.debugflag:
74 74 msg = 'invalid branchheads cache'
75 75 if repo.filtername is not None:
76 76 msg += ' (%s)' % repo.filtername
77 77 msg += ': %s\n'
78 78 repo.ui.debug(msg % pycompat.bytestr(inst))
79 79 partial = None
80 80 return partial
81 81
82 82 ### Nearest subset relation
83 83 # Nearest subset of filter X is a filter Y so that:
84 84 # * Y is included in X,
85 85 # * X - Y is as small as possible.
86 86 # This create and ordering used for branchmap purpose.
87 87 # the ordering may be partial
88 88 subsettable = {None: 'visible',
89 89 'visible-hidden': 'visible',
90 90 'visible': 'served',
91 91 'served': 'immutable',
92 92 'immutable': 'base'}
93 93
94 94 def updatecache(repo):
95 95 cl = repo.changelog
96 96 filtername = repo.filtername
97 97 partial = repo._branchcaches.get(filtername)
98 98
99 99 revs = []
100 100 if partial is None or not partial.validfor(repo):
101 101 partial = read(repo)
102 102 if partial is None:
103 103 subsetname = subsettable.get(filtername)
104 104 if subsetname is None:
105 105 partial = branchcache()
106 106 else:
107 107 subset = repo.filtered(subsetname)
108 108 partial = subset.branchmap().copy()
109 109 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
110 110 revs.extend(r for r in extrarevs if r <= partial.tiprev)
111 111 revs.extend(cl.revs(start=partial.tiprev + 1))
112 112 if revs:
113 113 partial.update(repo, revs)
114 114 partial.write(repo)
115 115
116 116 assert partial.validfor(repo), filtername
117 117 repo._branchcaches[repo.filtername] = partial
118 118
119 119 def replacecache(repo, bm):
120 120 """Replace the branchmap cache for a repo with a branch mapping.
121 121
122 122 This is likely only called during clone with a branch map from a remote.
123 123 """
124 124 rbheads = []
125 125 closed = []
126 126 for bheads in bm.itervalues():
127 127 rbheads.extend(bheads)
128 128 for h in bheads:
129 129 r = repo.changelog.rev(h)
130 130 b, c = repo.changelog.branchinfo(r)
131 131 if c:
132 132 closed.append(h)
133 133
134 134 if rbheads:
135 135 rtiprev = max((int(repo.changelog.rev(node))
136 136 for node in rbheads))
137 137 cache = branchcache(bm,
138 138 repo[rtiprev].node(),
139 139 rtiprev,
140 140 closednodes=closed)
141 141
142 142 # Try to stick it as low as possible
143 143 # filter above served are unlikely to be fetch from a clone
144 144 for candidate in ('base', 'immutable', 'served'):
145 145 rview = repo.filtered(candidate)
146 146 if cache.validfor(rview):
147 147 repo._branchcaches[candidate] = cache
148 148 cache.write(rview)
149 149 break
150 150
151 151 class branchcache(dict):
152 152 """A dict like object that hold branches heads cache.
153 153
154 154 This cache is used to avoid costly computations to determine all the
155 155 branch heads of a repo.
156 156
157 157 The cache is serialized on disk in the following format:
158 158
159 159 <tip hex node> <tip rev number> [optional filtered repo hex hash]
160 160 <branch head hex node> <open/closed state> <branch name>
161 161 <branch head hex node> <open/closed state> <branch name>
162 162 ...
163 163
164 164 The first line is used to check if the cache is still valid. If the
165 165 branch cache is for a filtered repo view, an optional third hash is
166 166 included that hashes the hashes of all filtered revisions.
167 167
168 168 The open/closed state is represented by a single letter 'o' or 'c'.
169 169 This field can be used to avoid changelog reads when determining if a
170 170 branch head closes a branch or not.
171 171 """
172 172
173 173 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
174 174 filteredhash=None, closednodes=None):
175 175 super(branchcache, self).__init__(entries)
176 176 self.tipnode = tipnode
177 177 self.tiprev = tiprev
178 178 self.filteredhash = filteredhash
179 179 # closednodes is a set of nodes that close their branch. If the branch
180 180 # cache has been updated, it may contain nodes that are no longer
181 181 # heads.
182 182 if closednodes is None:
183 183 self._closednodes = set()
184 184 else:
185 185 self._closednodes = closednodes
186 186
187 187 def validfor(self, repo):
188 188 """Is the cache content valid regarding a repo
189 189
190 190 - False when cached tipnode is unknown or if we detect a strip.
191 191 - True when cache is up to date or a subset of current repo."""
192 192 try:
193 193 return ((self.tipnode == repo.changelog.node(self.tiprev))
194 194 and (self.filteredhash == \
195 195 scmutil.filteredhash(repo, self.tiprev)))
196 196 except IndexError:
197 197 return False
198 198
199 199 def _branchtip(self, heads):
200 200 '''Return tuple with last open head in heads and false,
201 201 otherwise return last closed head and true.'''
202 202 tip = heads[-1]
203 203 closed = True
204 204 for h in reversed(heads):
205 205 if h not in self._closednodes:
206 206 tip = h
207 207 closed = False
208 208 break
209 209 return tip, closed
210 210
211 211 def branchtip(self, branch):
212 212 '''Return the tipmost open head on branch head, otherwise return the
213 213 tipmost closed head on branch.
214 214 Raise KeyError for unknown branch.'''
215 215 return self._branchtip(self[branch])[0]
216 216
217 217 def iteropen(self, nodes):
218 218 return (n for n in nodes if n not in self._closednodes)
219 219
220 220 def branchheads(self, branch, closed=False):
221 221 heads = self[branch]
222 222 if not closed:
223 223 heads = list(self.iteropen(heads))
224 224 return heads
225 225
226 226 def iterbranches(self):
227 227 for bn, heads in self.iteritems():
228 228 yield (bn, heads) + self._branchtip(heads)
229 229
230 230 def copy(self):
231 231 """return an deep copy of the branchcache object"""
232 232 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
233 233 self._closednodes)
234 234
235 235 def write(self, repo):
236 236 try:
237 237 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
238 238 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
239 239 if self.filteredhash is not None:
240 240 cachekey.append(hex(self.filteredhash))
241 241 f.write(" ".join(cachekey) + '\n')
242 242 nodecount = 0
243 243 for label, nodes in sorted(self.iteritems()):
244 244 for node in nodes:
245 245 nodecount += 1
246 246 if node in self._closednodes:
247 247 state = 'c'
248 248 else:
249 249 state = 'o'
250 250 f.write("%s %s %s\n" % (hex(node), state,
251 251 encoding.fromlocal(label)))
252 252 f.close()
253 253 repo.ui.log('branchcache',
254 254 'wrote %s branch cache with %d labels and %d nodes\n',
255 255 repo.filtername, len(self), nodecount)
256 256 except (IOError, OSError, error.Abort) as inst:
257 257 # Abort may be raised by read only opener, so log and continue
258 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
258 repo.ui.debug("couldn't write branch cache: %s\n" %
259 util.forcebytestr(inst))
259 260
260 261 def update(self, repo, revgen):
261 262 """Given a branchhead cache, self, that may have extra nodes or be
262 263 missing heads, and a generator of nodes that are strictly a superset of
263 264 heads missing, this function updates self to be correct.
264 265 """
265 266 starttime = util.timer()
266 267 cl = repo.changelog
267 268 # collect new branch entries
268 269 newbranches = {}
269 270 getbranchinfo = repo.revbranchcache().branchinfo
270 271 for r in revgen:
271 272 branch, closesbranch = getbranchinfo(r)
272 273 newbranches.setdefault(branch, []).append(r)
273 274 if closesbranch:
274 275 self._closednodes.add(cl.node(r))
275 276
276 277 # fetch current topological heads to speed up filtering
277 278 topoheads = set(cl.headrevs())
278 279
279 280 # if older branchheads are reachable from new ones, they aren't
280 281 # really branchheads. Note checking parents is insufficient:
281 282 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
282 283 for branch, newheadrevs in newbranches.iteritems():
283 284 bheads = self.setdefault(branch, [])
284 285 bheadset = set(cl.rev(node) for node in bheads)
285 286
286 287 # This have been tested True on all internal usage of this function.
287 288 # run it again in case of doubt
288 289 # assert not (set(bheadrevs) & set(newheadrevs))
289 290 newheadrevs.sort()
290 291 bheadset.update(newheadrevs)
291 292
292 293 # This prunes out two kinds of heads - heads that are superseded by
293 294 # a head in newheadrevs, and newheadrevs that are not heads because
294 295 # an existing head is their descendant.
295 296 uncertain = bheadset - topoheads
296 297 if uncertain:
297 298 floorrev = min(uncertain)
298 299 ancestors = set(cl.ancestors(newheadrevs, floorrev))
299 300 bheadset -= ancestors
300 301 bheadrevs = sorted(bheadset)
301 302 self[branch] = [cl.node(rev) for rev in bheadrevs]
302 303 tiprev = bheadrevs[-1]
303 304 if tiprev > self.tiprev:
304 305 self.tipnode = cl.node(tiprev)
305 306 self.tiprev = tiprev
306 307
307 308 if not self.validfor(repo):
308 309 # cache key are not valid anymore
309 310 self.tipnode = nullid
310 311 self.tiprev = nullrev
311 312 for heads in self.values():
312 313 tiprev = max(cl.rev(node) for node in heads)
313 314 if tiprev > self.tiprev:
314 315 self.tipnode = cl.node(tiprev)
315 316 self.tiprev = tiprev
316 317 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
317 318
318 319 duration = util.timer() - starttime
319 320 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
320 321 repo.filtername, duration)
321 322
322 323 # Revision branch info cache
323 324
324 325 _rbcversion = '-v1'
325 326 _rbcnames = 'rbc-names' + _rbcversion
326 327 _rbcrevs = 'rbc-revs' + _rbcversion
327 328 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
328 329 _rbcrecfmt = '>4sI'
329 330 _rbcrecsize = calcsize(_rbcrecfmt)
330 331 _rbcnodelen = 4
331 332 _rbcbranchidxmask = 0x7fffffff
332 333 _rbccloseflag = 0x80000000
333 334
334 335 class revbranchcache(object):
335 336 """Persistent cache, mapping from revision number to branch name and close.
336 337 This is a low level cache, independent of filtering.
337 338
338 339 Branch names are stored in rbc-names in internal encoding separated by 0.
339 340 rbc-names is append-only, and each branch name is only stored once and will
340 341 thus have a unique index.
341 342
342 343 The branch info for each revision is stored in rbc-revs as constant size
343 344 records. The whole file is read into memory, but it is only 'parsed' on
344 345 demand. The file is usually append-only but will be truncated if repo
345 346 modification is detected.
346 347 The record for each revision contains the first 4 bytes of the
347 348 corresponding node hash, and the record is only used if it still matches.
348 349 Even a completely trashed rbc-revs fill thus still give the right result
349 350 while converging towards full recovery ... assuming no incorrectly matching
350 351 node hashes.
351 352 The record also contains 4 bytes where 31 bits contains the index of the
352 353 branch and the last bit indicate that it is a branch close commit.
353 354 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
354 355 and will grow with it but be 1/8th of its size.
355 356 """
356 357
357 358 def __init__(self, repo, readonly=True):
358 359 assert repo.filtername is None
359 360 self._repo = repo
360 361 self._names = [] # branch names in local encoding with static index
361 362 self._rbcrevs = bytearray()
362 363 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
363 364 try:
364 365 bndata = repo.cachevfs.read(_rbcnames)
365 366 self._rbcsnameslen = len(bndata) # for verification before writing
366 367 if bndata:
367 368 self._names = [encoding.tolocal(bn)
368 369 for bn in bndata.split('\0')]
369 370 except (IOError, OSError):
370 371 if readonly:
371 372 # don't try to use cache - fall back to the slow path
372 373 self.branchinfo = self._branchinfo
373 374
374 375 if self._names:
375 376 try:
376 377 data = repo.cachevfs.read(_rbcrevs)
377 378 self._rbcrevs[:] = data
378 379 except (IOError, OSError) as inst:
379 380 repo.ui.debug("couldn't read revision branch cache: %s\n" %
380 381 util.forcebytestr(inst))
381 382 # remember number of good records on disk
382 383 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
383 384 len(repo.changelog))
384 385 if self._rbcrevslen == 0:
385 386 self._names = []
386 387 self._rbcnamescount = len(self._names) # number of names read at
387 388 # _rbcsnameslen
388 389 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
389 390
390 391 def _clear(self):
391 392 self._rbcsnameslen = 0
392 393 del self._names[:]
393 394 self._rbcnamescount = 0
394 395 self._namesreverse.clear()
395 396 self._rbcrevslen = len(self._repo.changelog)
396 397 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
397 398
398 399 def branchinfo(self, rev):
399 400 """Return branch name and close flag for rev, using and updating
400 401 persistent cache."""
401 402 changelog = self._repo.changelog
402 403 rbcrevidx = rev * _rbcrecsize
403 404
404 405 # avoid negative index, changelog.read(nullrev) is fast without cache
405 406 if rev == nullrev:
406 407 return changelog.branchinfo(rev)
407 408
408 409 # if requested rev isn't allocated, grow and cache the rev info
409 410 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
410 411 return self._branchinfo(rev)
411 412
412 413 # fast path: extract data from cache, use it if node is matching
413 414 reponode = changelog.node(rev)[:_rbcnodelen]
414 415 cachenode, branchidx = unpack_from(
415 416 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
416 417 close = bool(branchidx & _rbccloseflag)
417 418 if close:
418 419 branchidx &= _rbcbranchidxmask
419 420 if cachenode == '\0\0\0\0':
420 421 pass
421 422 elif cachenode == reponode:
422 423 try:
423 424 return self._names[branchidx], close
424 425 except IndexError:
425 426 # recover from invalid reference to unknown branch
426 427 self._repo.ui.debug("referenced branch names not found"
427 428 " - rebuilding revision branch cache from scratch\n")
428 429 self._clear()
429 430 else:
430 431 # rev/node map has changed, invalidate the cache from here up
431 432 self._repo.ui.debug("history modification detected - truncating "
432 433 "revision branch cache to revision %d\n" % rev)
433 434 truncate = rbcrevidx + _rbcrecsize
434 435 del self._rbcrevs[truncate:]
435 436 self._rbcrevslen = min(self._rbcrevslen, truncate)
436 437
437 438 # fall back to slow path and make sure it will be written to disk
438 439 return self._branchinfo(rev)
439 440
440 441 def _branchinfo(self, rev):
441 442 """Retrieve branch info from changelog and update _rbcrevs"""
442 443 changelog = self._repo.changelog
443 444 b, close = changelog.branchinfo(rev)
444 445 if b in self._namesreverse:
445 446 branchidx = self._namesreverse[b]
446 447 else:
447 448 branchidx = len(self._names)
448 449 self._names.append(b)
449 450 self._namesreverse[b] = branchidx
450 451 reponode = changelog.node(rev)
451 452 if close:
452 453 branchidx |= _rbccloseflag
453 454 self._setcachedata(rev, reponode, branchidx)
454 455 return b, close
455 456
456 457 def _setcachedata(self, rev, node, branchidx):
457 458 """Writes the node's branch data to the in-memory cache data."""
458 459 if rev == nullrev:
459 460 return
460 461 rbcrevidx = rev * _rbcrecsize
461 462 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
462 463 self._rbcrevs.extend('\0' *
463 464 (len(self._repo.changelog) * _rbcrecsize -
464 465 len(self._rbcrevs)))
465 466 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
466 467 self._rbcrevslen = min(self._rbcrevslen, rev)
467 468
468 469 tr = self._repo.currenttransaction()
469 470 if tr:
470 471 tr.addfinalize('write-revbranchcache', self.write)
471 472
472 473 def write(self, tr=None):
473 474 """Save branch cache if it is dirty."""
474 475 repo = self._repo
475 476 wlock = None
476 477 step = ''
477 478 try:
478 479 if self._rbcnamescount < len(self._names):
479 480 step = ' names'
480 481 wlock = repo.wlock(wait=False)
481 482 if self._rbcnamescount != 0:
482 483 f = repo.cachevfs.open(_rbcnames, 'ab')
483 484 if f.tell() == self._rbcsnameslen:
484 485 f.write('\0')
485 486 else:
486 487 f.close()
487 488 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
488 489 self._rbcnamescount = 0
489 490 self._rbcrevslen = 0
490 491 if self._rbcnamescount == 0:
491 492 # before rewriting names, make sure references are removed
492 493 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
493 494 f = repo.cachevfs.open(_rbcnames, 'wb')
494 495 f.write('\0'.join(encoding.fromlocal(b)
495 496 for b in self._names[self._rbcnamescount:]))
496 497 self._rbcsnameslen = f.tell()
497 498 f.close()
498 499 self._rbcnamescount = len(self._names)
499 500
500 501 start = self._rbcrevslen * _rbcrecsize
501 502 if start != len(self._rbcrevs):
502 503 step = ''
503 504 if wlock is None:
504 505 wlock = repo.wlock(wait=False)
505 506 revs = min(len(repo.changelog),
506 507 len(self._rbcrevs) // _rbcrecsize)
507 508 f = repo.cachevfs.open(_rbcrevs, 'ab')
508 509 if f.tell() != start:
509 510 repo.ui.debug("truncating cache/%s to %d\n"
510 511 % (_rbcrevs, start))
511 512 f.seek(start)
512 513 if f.tell() != start:
513 514 start = 0
514 515 f.seek(start)
515 516 f.truncate()
516 517 end = revs * _rbcrecsize
517 518 f.write(self._rbcrevs[start:end])
518 519 f.close()
519 520 self._rbcrevslen = revs
520 521 except (IOError, OSError, error.Abort, error.LockError) as inst:
521 522 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
522 523 % (step, inst))
523 524 finally:
524 525 if wlock is not None:
525 526 wlock.release()
@@ -1,788 +1,788 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import errno
16 16
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 match as matchmod,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 # Tags computation can be expensive and caches exist to make it fast in
33 33 # the common case.
34 34 #
35 35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 36 # each revision in the repository. The file is effectively an array of
37 37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 38 # details.
39 39 #
40 40 # The .hgtags filenode cache grows in proportion to the length of the
41 41 # changelog. The file is truncated when the # changelog is stripped.
42 42 #
43 43 # The purpose of the filenode cache is to avoid the most expensive part
44 44 # of finding global tags, which is looking up the .hgtags filenode in the
45 45 # manifest for each head. This can take dozens or over 100ms for
46 46 # repositories with very large manifests. Multiplied by dozens or even
47 47 # hundreds of heads and there is a significant performance concern.
48 48 #
49 49 # There also exist a separate cache file for each repository filter.
50 50 # These "tags-*" files store information about the history of tags.
51 51 #
52 52 # The tags cache files consists of a cache validation line followed by
53 53 # a history of tags.
54 54 #
55 55 # The cache validation line has the format:
56 56 #
57 57 # <tiprev> <tipnode> [<filteredhash>]
58 58 #
59 59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 60 # node for that changeset. These redundantly identify the repository
61 61 # tip from the time the cache was written. In addition, <filteredhash>,
62 62 # if present, is a 40 character hex hash of the contents of the filtered
63 63 # revisions for this filter. If the set of filtered revs changes, the
64 64 # hash will change and invalidate the cache.
65 65 #
66 66 # The history part of the tags cache consists of lines of the form:
67 67 #
68 68 # <node> <tag>
69 69 #
70 70 # (This format is identical to that of .hgtags files.)
71 71 #
72 72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 73 # the tag is associated with.
74 74 #
75 75 # Tags are written sorted by tag name.
76 76 #
77 77 # Tags associated with multiple changesets have an entry for each changeset.
78 78 # The most recent changeset (in terms of revlog ordering for the head
79 79 # setting it) for each tag is last.
80 80
81 81 def fnoderevs(ui, repo, revs):
82 82 """return the list of '.hgtags' fnodes used in a set revisions
83 83
84 84 This is returned as list of unique fnodes. We use a list instead of a set
85 85 because order matters when it comes to tags."""
86 86 unfi = repo.unfiltered()
87 87 tonode = unfi.changelog.node
88 88 nodes = [tonode(r) for r in revs]
89 89 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
90 90 fnodes = _filterfnodes(fnodes, nodes)
91 91 return fnodes
92 92
93 93 def _nulltonone(value):
94 94 """convert nullid to None
95 95
96 96 For tag value, nullid means "deleted". This small utility function helps
97 97 translating that to None."""
98 98 if value == nullid:
99 99 return None
100 100 return value
101 101
102 102 def difftags(ui, repo, oldfnodes, newfnodes):
103 103 """list differences between tags expressed in two set of file-nodes
104 104
105 105 The list contains entries in the form: (tagname, oldvalue, new value).
106 106 None is used to expressed missing value:
107 107 ('foo', None, 'abcd') is a new tag,
108 108 ('bar', 'ef01', None) is a deletion,
109 109 ('baz', 'abcd', 'ef01') is a tag movement.
110 110 """
111 111 if oldfnodes == newfnodes:
112 112 return []
113 113 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
114 114 newtags = _tagsfromfnodes(ui, repo, newfnodes)
115 115
116 116 # list of (tag, old, new): None means missing
117 117 entries = []
118 118 for tag, (new, __) in newtags.items():
119 119 new = _nulltonone(new)
120 120 old, __ = oldtags.pop(tag, (None, None))
121 121 old = _nulltonone(old)
122 122 if old != new:
123 123 entries.append((tag, old, new))
124 124 # handle deleted tags
125 125 for tag, (old, __) in oldtags.items():
126 126 old = _nulltonone(old)
127 127 if old is not None:
128 128 entries.append((tag, old, None))
129 129 entries.sort()
130 130 return entries
131 131
132 132 def writediff(fp, difflist):
133 133 """write tags diff information to a file.
134 134
135 135 Data are stored with a line based format:
136 136
137 137 <action> <hex-node> <tag-name>\n
138 138
139 139 Action are defined as follow:
140 140 -R tag is removed,
141 141 +A tag is added,
142 142 -M tag is moved (old value),
143 143 +M tag is moved (new value),
144 144
145 145 Example:
146 146
147 147 +A 875517b4806a848f942811a315a5bce30804ae85 t5
148 148
149 149 See documentation of difftags output for details about the input.
150 150 """
151 151 add = '+A %s %s\n'
152 152 remove = '-R %s %s\n'
153 153 updateold = '-M %s %s\n'
154 154 updatenew = '+M %s %s\n'
155 155 for tag, old, new in difflist:
156 156 # translate to hex
157 157 if old is not None:
158 158 old = hex(old)
159 159 if new is not None:
160 160 new = hex(new)
161 161 # write to file
162 162 if old is None:
163 163 fp.write(add % (new, tag))
164 164 elif new is None:
165 165 fp.write(remove % (old, tag))
166 166 else:
167 167 fp.write(updateold % (old, tag))
168 168 fp.write(updatenew % (new, tag))
169 169
170 170 def findglobaltags(ui, repo):
171 171 '''Find global tags in a repo: return a tagsmap
172 172
173 173 tagsmap: tag name to (node, hist) 2-tuples.
174 174
175 175 The tags cache is read and updated as a side-effect of calling.
176 176 '''
177 177 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
178 178 if cachetags is not None:
179 179 assert not shouldwrite
180 180 # XXX is this really 100% correct? are there oddball special
181 181 # cases where a global tag should outrank a local tag but won't,
182 182 # because cachetags does not contain rank info?
183 183 alltags = {}
184 184 _updatetags(cachetags, alltags)
185 185 return alltags
186 186
187 187 for head in reversed(heads): # oldest to newest
188 188 assert head in repo.changelog.nodemap, \
189 189 "tag cache returned bogus head %s" % short(head)
190 190 fnodes = _filterfnodes(tagfnode, reversed(heads))
191 191 alltags = _tagsfromfnodes(ui, repo, fnodes)
192 192
193 193 # and update the cache (if necessary)
194 194 if shouldwrite:
195 195 _writetagcache(ui, repo, valid, alltags)
196 196 return alltags
197 197
198 198 def _filterfnodes(tagfnode, nodes):
199 199 """return a list of unique fnodes
200 200
201 201 The order of this list matches the order of "nodes". Preserving this order
202 202 is important as reading tags in different order provides different
203 203 results."""
204 204 seen = set() # set of fnode
205 205 fnodes = []
206 206 for no in nodes: # oldest to newest
207 207 fnode = tagfnode.get(no)
208 208 if fnode and fnode not in seen:
209 209 seen.add(fnode)
210 210 fnodes.append(fnode)
211 211 return fnodes
212 212
213 213 def _tagsfromfnodes(ui, repo, fnodes):
214 214 """return a tagsmap from a list of file-node
215 215
216 216 tagsmap: tag name to (node, hist) 2-tuples.
217 217
218 218 The order of the list matters."""
219 219 alltags = {}
220 220 fctx = None
221 221 for fnode in fnodes:
222 222 if fctx is None:
223 223 fctx = repo.filectx('.hgtags', fileid=fnode)
224 224 else:
225 225 fctx = fctx.filectx(fnode)
226 226 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
227 227 _updatetags(filetags, alltags)
228 228 return alltags
229 229
230 230 def readlocaltags(ui, repo, alltags, tagtypes):
231 231 '''Read local tags in repo. Update alltags and tagtypes.'''
232 232 try:
233 233 data = repo.vfs.read("localtags")
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return
238 238
239 239 # localtags is in the local encoding; re-encode to UTF-8 on
240 240 # input for consistency with the rest of this module.
241 241 filetags = _readtags(
242 242 ui, repo, data.splitlines(), "localtags",
243 243 recode=encoding.fromlocal)
244 244
245 245 # remove tags pointing to invalid nodes
246 246 cl = repo.changelog
247 247 for t in list(filetags):
248 248 try:
249 249 cl.rev(filetags[t][0])
250 250 except (LookupError, ValueError):
251 251 del filetags[t]
252 252
253 253 _updatetags(filetags, alltags, 'local', tagtypes)
254 254
255 255 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
256 256 '''Read tag definitions from a file (or any source of lines).
257 257
258 258 This function returns two sortdicts with similar information:
259 259
260 260 - the first dict, bintaghist, contains the tag information as expected by
261 261 the _readtags function, i.e. a mapping from tag name to (node, hist):
262 262 - node is the node id from the last line read for that name,
263 263 - hist is the list of node ids previously associated with it (in file
264 264 order). All node ids are binary, not hex.
265 265
266 266 - the second dict, hextaglines, is a mapping from tag name to a list of
267 267 [hexnode, line number] pairs, ordered from the oldest to the newest node.
268 268
269 269 When calcnodelines is False the hextaglines dict is not calculated (an
270 270 empty dict is returned). This is done to improve this function's
271 271 performance in cases where the line numbers are not needed.
272 272 '''
273 273
274 274 bintaghist = util.sortdict()
275 275 hextaglines = util.sortdict()
276 276 count = 0
277 277
278 278 def dbg(msg):
279 279 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
280 280
281 281 for nline, line in enumerate(lines):
282 282 count += 1
283 283 if not line:
284 284 continue
285 285 try:
286 286 (nodehex, name) = line.split(" ", 1)
287 287 except ValueError:
288 288 dbg("cannot parse entry")
289 289 continue
290 290 name = name.strip()
291 291 if recode:
292 292 name = recode(name)
293 293 try:
294 294 nodebin = bin(nodehex)
295 295 except TypeError:
296 296 dbg("node '%s' is not well formed" % nodehex)
297 297 continue
298 298
299 299 # update filetags
300 300 if calcnodelines:
301 301 # map tag name to a list of line numbers
302 302 if name not in hextaglines:
303 303 hextaglines[name] = []
304 304 hextaglines[name].append([nodehex, nline])
305 305 continue
306 306 # map tag name to (node, hist)
307 307 if name not in bintaghist:
308 308 bintaghist[name] = []
309 309 bintaghist[name].append(nodebin)
310 310 return bintaghist, hextaglines
311 311
312 312 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
313 313 '''Read tag definitions from a file (or any source of lines).
314 314
315 315 Returns a mapping from tag name to (node, hist).
316 316
317 317 "node" is the node id from the last line read for that name. "hist"
318 318 is the list of node ids previously associated with it (in file order).
319 319 All node ids are binary, not hex.
320 320 '''
321 321 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
322 322 calcnodelines=calcnodelines)
323 323 # util.sortdict().__setitem__ is much slower at replacing then inserting
324 324 # new entries. The difference can matter if there are thousands of tags.
325 325 # Create a new sortdict to avoid the performance penalty.
326 326 newtags = util.sortdict()
327 327 for tag, taghist in filetags.items():
328 328 newtags[tag] = (taghist[-1], taghist[:-1])
329 329 return newtags
330 330
331 331 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
332 332 """Incorporate the tag info read from one file into dictionnaries
333 333
334 334 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
335 335
336 336 The second one, 'tagtypes', is optional and will be updated to track the
337 337 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
338 338 needs to be set."""
339 339 if tagtype is None:
340 340 assert tagtypes is None
341 341
342 342 for name, nodehist in filetags.iteritems():
343 343 if name not in alltags:
344 344 alltags[name] = nodehist
345 345 if tagtype is not None:
346 346 tagtypes[name] = tagtype
347 347 continue
348 348
349 349 # we prefer alltags[name] if:
350 350 # it supersedes us OR
351 351 # mutual supersedes and it has a higher rank
352 352 # otherwise we win because we're tip-most
353 353 anode, ahist = nodehist
354 354 bnode, bhist = alltags[name]
355 355 if (bnode != anode and anode in bhist and
356 356 (bnode not in ahist or len(bhist) > len(ahist))):
357 357 anode = bnode
358 358 elif tagtype is not None:
359 359 tagtypes[name] = tagtype
360 360 ahist.extend([n for n in bhist if n not in ahist])
361 361 alltags[name] = anode, ahist
362 362
363 363 def _filename(repo):
364 364 """name of a tagcache file for a given repo or repoview"""
365 365 filename = 'tags2'
366 366 if repo.filtername:
367 367 filename = '%s-%s' % (filename, repo.filtername)
368 368 return filename
369 369
370 370 def _readtagcache(ui, repo):
371 371 '''Read the tag cache.
372 372
373 373 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
374 374
375 375 If the cache is completely up-to-date, "cachetags" is a dict of the
376 376 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
377 377 None and "shouldwrite" is False.
378 378
379 379 If the cache is not up to date, "cachetags" is None. "heads" is a list
380 380 of all heads currently in the repository, ordered from tip to oldest.
381 381 "validinfo" is a tuple describing cache validation info. This is used
382 382 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
383 383 filenode. "shouldwrite" is True.
384 384
385 385 If the cache is not up to date, the caller is responsible for reading tag
386 386 info from each returned head. (See findglobaltags().)
387 387 '''
388 388 try:
389 389 cachefile = repo.cachevfs(_filename(repo), 'r')
390 390 # force reading the file for static-http
391 391 cachelines = iter(cachefile)
392 392 except IOError:
393 393 cachefile = None
394 394
395 395 cacherev = None
396 396 cachenode = None
397 397 cachehash = None
398 398 if cachefile:
399 399 try:
400 400 validline = next(cachelines)
401 401 validline = validline.split()
402 402 cacherev = int(validline[0])
403 403 cachenode = bin(validline[1])
404 404 if len(validline) > 2:
405 405 cachehash = bin(validline[2])
406 406 except Exception:
407 407 # corruption of the cache, just recompute it.
408 408 pass
409 409
410 410 tipnode = repo.changelog.tip()
411 411 tiprev = len(repo.changelog) - 1
412 412
413 413 # Case 1 (common): tip is the same, so nothing has changed.
414 414 # (Unchanged tip trivially means no changesets have been added.
415 415 # But, thanks to localrepository.destroyed(), it also means none
416 416 # have been destroyed by strip or rollback.)
417 417 if (cacherev == tiprev
418 418 and cachenode == tipnode
419 419 and cachehash == scmutil.filteredhash(repo, tiprev)):
420 420 tags = _readtags(ui, repo, cachelines, cachefile.name)
421 421 cachefile.close()
422 422 return (None, None, None, tags, False)
423 423 if cachefile:
424 424 cachefile.close() # ignore rest of file
425 425
426 426 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
427 427
428 428 repoheads = repo.heads()
429 429 # Case 2 (uncommon): empty repo; get out quickly and don't bother
430 430 # writing an empty cache.
431 431 if repoheads == [nullid]:
432 432 return ([], {}, valid, {}, False)
433 433
434 434 # Case 3 (uncommon): cache file missing or empty.
435 435
436 436 # Case 4 (uncommon): tip rev decreased. This should only happen
437 437 # when we're called from localrepository.destroyed(). Refresh the
438 438 # cache so future invocations will not see disappeared heads in the
439 439 # cache.
440 440
441 441 # Case 5 (common): tip has changed, so we've added/replaced heads.
442 442
443 443 # As it happens, the code to handle cases 3, 4, 5 is the same.
444 444
445 445 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
446 446 # exposed".
447 447 if not len(repo.file('.hgtags')):
448 448 # No tags have ever been committed, so we can avoid a
449 449 # potentially expensive search.
450 450 return ([], {}, valid, None, True)
451 451
452 452
453 453 # Now we have to lookup the .hgtags filenode for every new head.
454 454 # This is the most expensive part of finding tags, so performance
455 455 # depends primarily on the size of newheads. Worst case: no cache
456 456 # file, so newheads == repoheads.
457 457 cachefnode = _getfnodes(ui, repo, repoheads)
458 458
459 459 # Caller has to iterate over all heads, but can use the filenodes in
460 460 # cachefnode to get to each .hgtags revision quickly.
461 461 return (repoheads, cachefnode, valid, None, True)
462 462
463 463 def _getfnodes(ui, repo, nodes):
464 464 """return .hgtags fnodes for a list of changeset nodes
465 465
466 466 Return value is a {node: fnode} mapping. There will be no entry for nodes
467 467 without a '.hgtags' file.
468 468 """
469 469 starttime = util.timer()
470 470 fnodescache = hgtagsfnodescache(repo.unfiltered())
471 471 cachefnode = {}
472 472 for node in reversed(nodes):
473 473 fnode = fnodescache.getfnode(node)
474 474 if fnode != nullid:
475 475 cachefnode[node] = fnode
476 476
477 477 fnodescache.write()
478 478
479 479 duration = util.timer() - starttime
480 480 ui.log('tagscache',
481 481 '%d/%d cache hits/lookups in %0.4f '
482 482 'seconds\n',
483 483 fnodescache.hitcount, fnodescache.lookupcount, duration)
484 484 return cachefnode
485 485
486 486 def _writetagcache(ui, repo, valid, cachetags):
487 487 filename = _filename(repo)
488 488 try:
489 489 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
490 490 except (OSError, IOError):
491 491 return
492 492
493 493 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
494 494 filename, len(cachetags))
495 495
496 496 if valid[2]:
497 497 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
498 498 else:
499 499 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
500 500
501 501 # Tag names in the cache are in UTF-8 -- which is the whole reason
502 502 # we keep them in UTF-8 throughout this module. If we converted
503 503 # them local encoding on input, we would lose info writing them to
504 504 # the cache.
505 505 for (name, (node, hist)) in sorted(cachetags.iteritems()):
506 506 for n in hist:
507 507 cachefile.write("%s %s\n" % (hex(n), name))
508 508 cachefile.write("%s %s\n" % (hex(node), name))
509 509
510 510 try:
511 511 cachefile.close()
512 512 except (OSError, IOError):
513 513 pass
514 514
515 515 def tag(repo, names, node, message, local, user, date, editor=False):
516 516 '''tag a revision with one or more symbolic names.
517 517
518 518 names is a list of strings or, when adding a single tag, names may be a
519 519 string.
520 520
521 521 if local is True, the tags are stored in a per-repository file.
522 522 otherwise, they are stored in the .hgtags file, and a new
523 523 changeset is committed with the change.
524 524
525 525 keyword arguments:
526 526
527 527 local: whether to store tags in non-version-controlled file
528 528 (default False)
529 529
530 530 message: commit message to use if committing
531 531
532 532 user: name of user to use if committing
533 533
534 534 date: date tuple to use if committing'''
535 535
536 536 if not local:
537 537 m = matchmod.exact(repo.root, '', ['.hgtags'])
538 538 if any(repo.status(match=m, unknown=True, ignored=True)):
539 539 raise error.Abort(_('working copy of .hgtags is changed'),
540 540 hint=_('please commit .hgtags manually'))
541 541
542 542 with repo.wlock():
543 543 repo.tags() # instantiate the cache
544 544 _tag(repo, names, node, message, local, user, date,
545 545 editor=editor)
546 546
547 547 def _tag(repo, names, node, message, local, user, date, extra=None,
548 548 editor=False):
549 549 if isinstance(names, str):
550 550 names = (names,)
551 551
552 552 branches = repo.branchmap()
553 553 for name in names:
554 554 repo.hook('pretag', throw=True, node=hex(node), tag=name,
555 555 local=local)
556 556 if name in branches:
557 557 repo.ui.warn(_("warning: tag %s conflicts with existing"
558 558 " branch name\n") % name)
559 559
560 560 def writetags(fp, names, munge, prevtags):
561 561 fp.seek(0, 2)
562 562 if prevtags and prevtags[-1] != '\n':
563 563 fp.write('\n')
564 564 for name in names:
565 565 if munge:
566 566 m = munge(name)
567 567 else:
568 568 m = name
569 569
570 570 if (repo._tagscache.tagtypes and
571 571 name in repo._tagscache.tagtypes):
572 572 old = repo.tags().get(name, nullid)
573 573 fp.write('%s %s\n' % (hex(old), m))
574 574 fp.write('%s %s\n' % (hex(node), m))
575 575 fp.close()
576 576
577 577 prevtags = ''
578 578 if local:
579 579 try:
580 580 fp = repo.vfs('localtags', 'r+')
581 581 except IOError:
582 582 fp = repo.vfs('localtags', 'a')
583 583 else:
584 584 prevtags = fp.read()
585 585
586 586 # local tags are stored in the current charset
587 587 writetags(fp, names, None, prevtags)
588 588 for name in names:
589 589 repo.hook('tag', node=hex(node), tag=name, local=local)
590 590 return
591 591
592 592 try:
593 593 fp = repo.wvfs('.hgtags', 'rb+')
594 594 except IOError as e:
595 595 if e.errno != errno.ENOENT:
596 596 raise
597 597 fp = repo.wvfs('.hgtags', 'ab')
598 598 else:
599 599 prevtags = fp.read()
600 600
601 601 # committed tags are stored in UTF-8
602 602 writetags(fp, names, encoding.fromlocal, prevtags)
603 603
604 604 fp.close()
605 605
606 606 repo.invalidatecaches()
607 607
608 608 if '.hgtags' not in repo.dirstate:
609 609 repo[None].add(['.hgtags'])
610 610
611 611 m = matchmod.exact(repo.root, '', ['.hgtags'])
612 612 tagnode = repo.commit(message, user, date, extra=extra, match=m,
613 613 editor=editor)
614 614
615 615 for name in names:
616 616 repo.hook('tag', node=hex(node), tag=name, local=local)
617 617
618 618 return tagnode
619 619
620 620 _fnodescachefile = 'hgtagsfnodes1'
621 621 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
622 622 _fnodesmissingrec = '\xff' * 24
623 623
624 624 class hgtagsfnodescache(object):
625 625 """Persistent cache mapping revisions to .hgtags filenodes.
626 626
627 627 The cache is an array of records. Each item in the array corresponds to
628 628 a changelog revision. Values in the array contain the first 4 bytes of
629 629 the node hash and the 20 bytes .hgtags filenode for that revision.
630 630
631 631 The first 4 bytes are present as a form of verification. Repository
632 632 stripping and rewriting may change the node at a numeric revision in the
633 633 changelog. The changeset fragment serves as a verifier to detect
634 634 rewriting. This logic is shared with the rev branch cache (see
635 635 branchmap.py).
636 636
637 637 The instance holds in memory the full cache content but entries are
638 638 only parsed on read.
639 639
640 640 Instances behave like lists. ``c[i]`` works where i is a rev or
641 641 changeset node. Missing indexes are populated automatically on access.
642 642 """
643 643 def __init__(self, repo):
644 644 assert repo.filtername is None
645 645
646 646 self._repo = repo
647 647
648 648 # Only for reporting purposes.
649 649 self.lookupcount = 0
650 650 self.hitcount = 0
651 651
652 652
653 653 try:
654 654 data = repo.cachevfs.read(_fnodescachefile)
655 655 except (OSError, IOError):
656 656 data = ""
657 657 self._raw = bytearray(data)
658 658
659 659 # The end state of self._raw is an array that is of the exact length
660 660 # required to hold a record for every revision in the repository.
661 661 # We truncate or extend the array as necessary. self._dirtyoffset is
662 662 # defined to be the start offset at which we need to write the output
663 663 # file. This offset is also adjusted when new entries are calculated
664 664 # for array members.
665 665 cllen = len(repo.changelog)
666 666 wantedlen = cllen * _fnodesrecsize
667 667 rawlen = len(self._raw)
668 668
669 669 self._dirtyoffset = None
670 670
671 671 if rawlen < wantedlen:
672 672 self._dirtyoffset = rawlen
673 673 self._raw.extend('\xff' * (wantedlen - rawlen))
674 674 elif rawlen > wantedlen:
675 675 # There's no easy way to truncate array instances. This seems
676 676 # slightly less evil than copying a potentially large array slice.
677 677 for i in range(rawlen - wantedlen):
678 678 self._raw.pop()
679 679 self._dirtyoffset = len(self._raw)
680 680
681 681 def getfnode(self, node, computemissing=True):
682 682 """Obtain the filenode of the .hgtags file at a specified revision.
683 683
684 684 If the value is in the cache, the entry will be validated and returned.
685 685 Otherwise, the filenode will be computed and returned unless
686 686 "computemissing" is False, in which case None will be returned without
687 687 any potentially expensive computation being performed.
688 688
689 689 If an .hgtags does not exist at the specified revision, nullid is
690 690 returned.
691 691 """
692 692 ctx = self._repo[node]
693 693 rev = ctx.rev()
694 694
695 695 self.lookupcount += 1
696 696
697 697 offset = rev * _fnodesrecsize
698 698 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
699 699 properprefix = node[0:4]
700 700
701 701 # Validate and return existing entry.
702 702 if record != _fnodesmissingrec:
703 703 fileprefix = record[0:4]
704 704
705 705 if fileprefix == properprefix:
706 706 self.hitcount += 1
707 707 return record[4:]
708 708
709 709 # Fall through.
710 710
711 711 # If we get here, the entry is either missing or invalid.
712 712
713 713 if not computemissing:
714 714 return None
715 715
716 716 # Populate missing entry.
717 717 try:
718 718 fnode = ctx.filenode('.hgtags')
719 719 except error.LookupError:
720 720 # No .hgtags file on this revision.
721 721 fnode = nullid
722 722
723 723 self._writeentry(offset, properprefix, fnode)
724 724 return fnode
725 725
726 726 def setfnode(self, node, fnode):
727 727 """Set the .hgtags filenode for a given changeset."""
728 728 assert len(fnode) == 20
729 729 ctx = self._repo[node]
730 730
731 731 # Do a lookup first to avoid writing if nothing has changed.
732 732 if self.getfnode(ctx.node(), computemissing=False) == fnode:
733 733 return
734 734
735 735 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
736 736
737 737 def _writeentry(self, offset, prefix, fnode):
738 738 # Slices on array instances only accept other array.
739 739 entry = bytearray(prefix + fnode)
740 740 self._raw[offset:offset + _fnodesrecsize] = entry
741 741 # self._dirtyoffset could be None.
742 742 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
743 743
744 744 def write(self):
745 745 """Perform all necessary writes to cache file.
746 746
747 747 This may no-op if no writes are needed or if a write lock could
748 748 not be obtained.
749 749 """
750 750 if self._dirtyoffset is None:
751 751 return
752 752
753 753 data = self._raw[self._dirtyoffset:]
754 754 if not data:
755 755 return
756 756
757 757 repo = self._repo
758 758
759 759 try:
760 760 lock = repo.wlock(wait=False)
761 761 except error.LockError:
762 762 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
763 763 'lock cannot be acquired\n' % (_fnodescachefile))
764 764 return
765 765
766 766 try:
767 767 f = repo.cachevfs.open(_fnodescachefile, 'ab')
768 768 try:
769 769 # if the file has been truncated
770 770 actualoffset = f.tell()
771 771 if actualoffset < self._dirtyoffset:
772 772 self._dirtyoffset = actualoffset
773 773 data = self._raw[self._dirtyoffset:]
774 774 f.seek(self._dirtyoffset)
775 775 f.truncate()
776 776 repo.ui.log('tagscache',
777 777 'writing %d bytes to cache/%s\n' % (
778 778 len(data), _fnodescachefile))
779 779 f.write(data)
780 780 self._dirtyoffset = None
781 781 finally:
782 782 f.close()
783 783 except (IOError, OSError) as inst:
784 784 repo.ui.log('tagscache',
785 785 "couldn't write cache/%s: %s\n" % (
786 _fnodescachefile, inst))
786 _fnodescachefile, util.forcebytestr(inst)))
787 787 finally:
788 788 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now