##// END OF EJS Templates
tags: make argument 'tagtype' optional in '_updatetags'...
Pierre-Yves David -
r31708:d0e7c70f default
parent child Browse files
Show More
@@ -1,676 +1,683 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import errno
16 16
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 match as matchmod,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 # Tags computation can be expensive and caches exist to make it fast in
33 33 # the common case.
34 34 #
35 35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 36 # each revision in the repository. The file is effectively an array of
37 37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 38 # details.
39 39 #
40 40 # The .hgtags filenode cache grows in proportion to the length of the
41 41 # changelog. The file is truncated when the # changelog is stripped.
42 42 #
43 43 # The purpose of the filenode cache is to avoid the most expensive part
44 44 # of finding global tags, which is looking up the .hgtags filenode in the
45 45 # manifest for each head. This can take dozens or over 100ms for
46 46 # repositories with very large manifests. Multiplied by dozens or even
47 47 # hundreds of heads and there is a significant performance concern.
48 48 #
49 49 # There also exist a separate cache file for each repository filter.
50 50 # These "tags-*" files store information about the history of tags.
51 51 #
52 52 # The tags cache files consists of a cache validation line followed by
53 53 # a history of tags.
54 54 #
55 55 # The cache validation line has the format:
56 56 #
57 57 # <tiprev> <tipnode> [<filteredhash>]
58 58 #
59 59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 60 # node for that changeset. These redundantly identify the repository
61 61 # tip from the time the cache was written. In addition, <filteredhash>,
62 62 # if present, is a 40 character hex hash of the contents of the filtered
63 63 # revisions for this filter. If the set of filtered revs changes, the
64 64 # hash will change and invalidate the cache.
65 65 #
66 66 # The history part of the tags cache consists of lines of the form:
67 67 #
68 68 # <node> <tag>
69 69 #
70 70 # (This format is identical to that of .hgtags files.)
71 71 #
72 72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 73 # the tag is associated with.
74 74 #
75 75 # Tags are written sorted by tag name.
76 76 #
77 77 # Tags associated with multiple changesets have an entry for each changeset.
78 78 # The most recent changeset (in terms of revlog ordering for the head
79 79 # setting it) for each tag is last.
80 80
81 81 def findglobaltags(ui, repo):
82 82 '''Find global tags in a repo: return (alltags, tagtypes)
83 83
84 84 "alltags" maps tag name to (node, hist) 2-tuples.
85 85
86 86 "tagtypes" maps tag name to tag type. Global tags always have the
87 87 "global" tag type.
88 88
89 89 The tags cache is read and updated as a side-effect of calling.
90 90 '''
91 91 alltags = {}
92 92 tagtypes = {}
93 93
94 94 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
95 95 if cachetags is not None:
96 96 assert not shouldwrite
97 97 # XXX is this really 100% correct? are there oddball special
98 98 # cases where a global tag should outrank a local tag but won't,
99 99 # because cachetags does not contain rank info?
100 100 _updatetags(cachetags, alltags, 'global', tagtypes)
101 101 return alltags, tagtypes
102 102
103 103 seen = set() # set of fnode
104 104 fctx = None
105 105 for head in reversed(heads): # oldest to newest
106 106 assert head in repo.changelog.nodemap, \
107 107 "tag cache returned bogus head %s" % short(head)
108 108
109 109 fnode = tagfnode.get(head)
110 110 if fnode and fnode not in seen:
111 111 seen.add(fnode)
112 112 if not fctx:
113 113 fctx = repo.filectx('.hgtags', fileid=fnode)
114 114 else:
115 115 fctx = fctx.filectx(fnode)
116 116
117 117 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
118 118 _updatetags(filetags, alltags, 'global', tagtypes)
119 119
120 120 # and update the cache (if necessary)
121 121 if shouldwrite:
122 122 _writetagcache(ui, repo, valid, alltags)
123 123 return alltags, tagtypes
124 124
125 125 def readlocaltags(ui, repo, alltags, tagtypes):
126 126 '''Read local tags in repo. Update alltags and tagtypes.'''
127 127 try:
128 128 data = repo.vfs.read("localtags")
129 129 except IOError as inst:
130 130 if inst.errno != errno.ENOENT:
131 131 raise
132 132 return
133 133
134 134 # localtags is in the local encoding; re-encode to UTF-8 on
135 135 # input for consistency with the rest of this module.
136 136 filetags = _readtags(
137 137 ui, repo, data.splitlines(), "localtags",
138 138 recode=encoding.fromlocal)
139 139
140 140 # remove tags pointing to invalid nodes
141 141 cl = repo.changelog
142 142 for t in filetags.keys():
143 143 try:
144 144 cl.rev(filetags[t][0])
145 145 except (LookupError, ValueError):
146 146 del filetags[t]
147 147
148 148 _updatetags(filetags, alltags, 'local', tagtypes)
149 149
150 150 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
151 151 '''Read tag definitions from a file (or any source of lines).
152 152
153 153 This function returns two sortdicts with similar information:
154 154
155 155 - the first dict, bintaghist, contains the tag information as expected by
156 156 the _readtags function, i.e. a mapping from tag name to (node, hist):
157 157 - node is the node id from the last line read for that name,
158 158 - hist is the list of node ids previously associated with it (in file
159 159 order). All node ids are binary, not hex.
160 160
161 161 - the second dict, hextaglines, is a mapping from tag name to a list of
162 162 [hexnode, line number] pairs, ordered from the oldest to the newest node.
163 163
164 164 When calcnodelines is False the hextaglines dict is not calculated (an
165 165 empty dict is returned). This is done to improve this function's
166 166 performance in cases where the line numbers are not needed.
167 167 '''
168 168
169 169 bintaghist = util.sortdict()
170 170 hextaglines = util.sortdict()
171 171 count = 0
172 172
173 173 def dbg(msg):
174 174 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
175 175
176 176 for nline, line in enumerate(lines):
177 177 count += 1
178 178 if not line:
179 179 continue
180 180 try:
181 181 (nodehex, name) = line.split(" ", 1)
182 182 except ValueError:
183 183 dbg("cannot parse entry")
184 184 continue
185 185 name = name.strip()
186 186 if recode:
187 187 name = recode(name)
188 188 try:
189 189 nodebin = bin(nodehex)
190 190 except TypeError:
191 191 dbg("node '%s' is not well formed" % nodehex)
192 192 continue
193 193
194 194 # update filetags
195 195 if calcnodelines:
196 196 # map tag name to a list of line numbers
197 197 if name not in hextaglines:
198 198 hextaglines[name] = []
199 199 hextaglines[name].append([nodehex, nline])
200 200 continue
201 201 # map tag name to (node, hist)
202 202 if name not in bintaghist:
203 203 bintaghist[name] = []
204 204 bintaghist[name].append(nodebin)
205 205 return bintaghist, hextaglines
206 206
207 207 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
208 208 '''Read tag definitions from a file (or any source of lines).
209 209
210 210 Returns a mapping from tag name to (node, hist).
211 211
212 212 "node" is the node id from the last line read for that name. "hist"
213 213 is the list of node ids previously associated with it (in file order).
214 214 All node ids are binary, not hex.
215 215 '''
216 216 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
217 217 calcnodelines=calcnodelines)
218 218 # util.sortdict().__setitem__ is much slower at replacing then inserting
219 219 # new entries. The difference can matter if there are thousands of tags.
220 220 # Create a new sortdict to avoid the performance penalty.
221 221 newtags = util.sortdict()
222 222 for tag, taghist in filetags.items():
223 223 newtags[tag] = (taghist[-1], taghist[:-1])
224 224 return newtags
225 225
226 def _updatetags(filetags, alltags, tagtype, tagtypes):
227 '''Incorporate the tag info read from one file into the two
228 dictionaries, alltags and tagtypes, that contain all tag
229 info (global across all heads plus local).'''
226 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
227 """Incorporate the tag info read from one file into dictionnaries
228
229 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
230
231 The second one, 'tagtypes', is optional and will be updated to track the
232 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
233 needs to be set."""
234 if tagtype is None:
235 assert tagtypes is None
230 236
231 237 for name, nodehist in filetags.iteritems():
232 238 if name not in alltags:
233 239 alltags[name] = nodehist
240 if tagtype is not None:
234 241 tagtypes[name] = tagtype
235 242 continue
236 243
237 244 # we prefer alltags[name] if:
238 245 # it supersedes us OR
239 246 # mutual supersedes and it has a higher rank
240 247 # otherwise we win because we're tip-most
241 248 anode, ahist = nodehist
242 249 bnode, bhist = alltags[name]
243 250 if (bnode != anode and anode in bhist and
244 251 (bnode not in ahist or len(bhist) > len(ahist))):
245 252 anode = bnode
246 else:
253 elif tagtype is not None:
247 254 tagtypes[name] = tagtype
248 255 ahist.extend([n for n in bhist if n not in ahist])
249 256 alltags[name] = anode, ahist
250 257
251 258 def _filename(repo):
252 259 """name of a tagcache file for a given repo or repoview"""
253 260 filename = 'cache/tags2'
254 261 if repo.filtername:
255 262 filename = '%s-%s' % (filename, repo.filtername)
256 263 return filename
257 264
258 265 def _readtagcache(ui, repo):
259 266 '''Read the tag cache.
260 267
261 268 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
262 269
263 270 If the cache is completely up-to-date, "cachetags" is a dict of the
264 271 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
265 272 None and "shouldwrite" is False.
266 273
267 274 If the cache is not up to date, "cachetags" is None. "heads" is a list
268 275 of all heads currently in the repository, ordered from tip to oldest.
269 276 "validinfo" is a tuple describing cache validation info. This is used
270 277 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
271 278 filenode. "shouldwrite" is True.
272 279
273 280 If the cache is not up to date, the caller is responsible for reading tag
274 281 info from each returned head. (See findglobaltags().)
275 282 '''
276 283 try:
277 284 cachefile = repo.vfs(_filename(repo), 'r')
278 285 # force reading the file for static-http
279 286 cachelines = iter(cachefile)
280 287 except IOError:
281 288 cachefile = None
282 289
283 290 cacherev = None
284 291 cachenode = None
285 292 cachehash = None
286 293 if cachefile:
287 294 try:
288 295 validline = next(cachelines)
289 296 validline = validline.split()
290 297 cacherev = int(validline[0])
291 298 cachenode = bin(validline[1])
292 299 if len(validline) > 2:
293 300 cachehash = bin(validline[2])
294 301 except Exception:
295 302 # corruption of the cache, just recompute it.
296 303 pass
297 304
298 305 tipnode = repo.changelog.tip()
299 306 tiprev = len(repo.changelog) - 1
300 307
301 308 # Case 1 (common): tip is the same, so nothing has changed.
302 309 # (Unchanged tip trivially means no changesets have been added.
303 310 # But, thanks to localrepository.destroyed(), it also means none
304 311 # have been destroyed by strip or rollback.)
305 312 if (cacherev == tiprev
306 313 and cachenode == tipnode
307 314 and cachehash == scmutil.filteredhash(repo, tiprev)):
308 315 tags = _readtags(ui, repo, cachelines, cachefile.name)
309 316 cachefile.close()
310 317 return (None, None, None, tags, False)
311 318 if cachefile:
312 319 cachefile.close() # ignore rest of file
313 320
314 321 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
315 322
316 323 repoheads = repo.heads()
317 324 # Case 2 (uncommon): empty repo; get out quickly and don't bother
318 325 # writing an empty cache.
319 326 if repoheads == [nullid]:
320 327 return ([], {}, valid, {}, False)
321 328
322 329 # Case 3 (uncommon): cache file missing or empty.
323 330
324 331 # Case 4 (uncommon): tip rev decreased. This should only happen
325 332 # when we're called from localrepository.destroyed(). Refresh the
326 333 # cache so future invocations will not see disappeared heads in the
327 334 # cache.
328 335
329 336 # Case 5 (common): tip has changed, so we've added/replaced heads.
330 337
331 338 # As it happens, the code to handle cases 3, 4, 5 is the same.
332 339
333 340 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
334 341 # exposed".
335 342 if not len(repo.file('.hgtags')):
336 343 # No tags have ever been committed, so we can avoid a
337 344 # potentially expensive search.
338 345 return ([], {}, valid, None, True)
339 346
340 347
341 348 # Now we have to lookup the .hgtags filenode for every new head.
342 349 # This is the most expensive part of finding tags, so performance
343 350 # depends primarily on the size of newheads. Worst case: no cache
344 351 # file, so newheads == repoheads.
345 352 cachefnode = _getfnodes(ui, repo, repoheads)
346 353
347 354 # Caller has to iterate over all heads, but can use the filenodes in
348 355 # cachefnode to get to each .hgtags revision quickly.
349 356 return (repoheads, cachefnode, valid, None, True)
350 357
351 358 def _getfnodes(ui, repo, nodes):
352 359 """return .hgtags fnodes for a list of changeset nodes
353 360
354 361 Return value is a {node: fnode} mapping. There will be no entry for nodes
355 362 without a '.hgtags' file.
356 363 """
357 364 starttime = util.timer()
358 365 fnodescache = hgtagsfnodescache(repo.unfiltered())
359 366 cachefnode = {}
360 367 for head in reversed(nodes):
361 368 fnode = fnodescache.getfnode(head)
362 369 if fnode != nullid:
363 370 cachefnode[head] = fnode
364 371
365 372 fnodescache.write()
366 373
367 374 duration = util.timer() - starttime
368 375 ui.log('tagscache',
369 376 '%d/%d cache hits/lookups in %0.4f '
370 377 'seconds\n',
371 378 fnodescache.hitcount, fnodescache.lookupcount, duration)
372 379 return cachefnode
373 380
374 381 def _writetagcache(ui, repo, valid, cachetags):
375 382 filename = _filename(repo)
376 383 try:
377 384 cachefile = repo.vfs(filename, 'w', atomictemp=True)
378 385 except (OSError, IOError):
379 386 return
380 387
381 388 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
382 389 filename, len(cachetags))
383 390
384 391 if valid[2]:
385 392 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
386 393 else:
387 394 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
388 395
389 396 # Tag names in the cache are in UTF-8 -- which is the whole reason
390 397 # we keep them in UTF-8 throughout this module. If we converted
391 398 # them local encoding on input, we would lose info writing them to
392 399 # the cache.
393 400 for (name, (node, hist)) in sorted(cachetags.iteritems()):
394 401 for n in hist:
395 402 cachefile.write("%s %s\n" % (hex(n), name))
396 403 cachefile.write("%s %s\n" % (hex(node), name))
397 404
398 405 try:
399 406 cachefile.close()
400 407 except (OSError, IOError):
401 408 pass
402 409
403 410 def tag(repo, names, node, message, local, user, date, editor=False):
404 411 '''tag a revision with one or more symbolic names.
405 412
406 413 names is a list of strings or, when adding a single tag, names may be a
407 414 string.
408 415
409 416 if local is True, the tags are stored in a per-repository file.
410 417 otherwise, they are stored in the .hgtags file, and a new
411 418 changeset is committed with the change.
412 419
413 420 keyword arguments:
414 421
415 422 local: whether to store tags in non-version-controlled file
416 423 (default False)
417 424
418 425 message: commit message to use if committing
419 426
420 427 user: name of user to use if committing
421 428
422 429 date: date tuple to use if committing'''
423 430
424 431 if not local:
425 432 m = matchmod.exact(repo.root, '', ['.hgtags'])
426 433 if any(repo.status(match=m, unknown=True, ignored=True)):
427 434 raise error.Abort(_('working copy of .hgtags is changed'),
428 435 hint=_('please commit .hgtags manually'))
429 436
430 437 repo.tags() # instantiate the cache
431 438 _tag(repo.unfiltered(), names, node, message, local, user, date,
432 439 editor=editor)
433 440
434 441 def _tag(repo, names, node, message, local, user, date, extra=None,
435 442 editor=False):
436 443 if isinstance(names, str):
437 444 names = (names,)
438 445
439 446 branches = repo.branchmap()
440 447 for name in names:
441 448 repo.hook('pretag', throw=True, node=hex(node), tag=name,
442 449 local=local)
443 450 if name in branches:
444 451 repo.ui.warn(_("warning: tag %s conflicts with existing"
445 452 " branch name\n") % name)
446 453
447 454 def writetags(fp, names, munge, prevtags):
448 455 fp.seek(0, 2)
449 456 if prevtags and prevtags[-1] != '\n':
450 457 fp.write('\n')
451 458 for name in names:
452 459 if munge:
453 460 m = munge(name)
454 461 else:
455 462 m = name
456 463
457 464 if (repo._tagscache.tagtypes and
458 465 name in repo._tagscache.tagtypes):
459 466 old = repo.tags().get(name, nullid)
460 467 fp.write('%s %s\n' % (hex(old), m))
461 468 fp.write('%s %s\n' % (hex(node), m))
462 469 fp.close()
463 470
464 471 prevtags = ''
465 472 if local:
466 473 try:
467 474 fp = repo.vfs('localtags', 'r+')
468 475 except IOError:
469 476 fp = repo.vfs('localtags', 'a')
470 477 else:
471 478 prevtags = fp.read()
472 479
473 480 # local tags are stored in the current charset
474 481 writetags(fp, names, None, prevtags)
475 482 for name in names:
476 483 repo.hook('tag', node=hex(node), tag=name, local=local)
477 484 return
478 485
479 486 try:
480 487 fp = repo.wvfs('.hgtags', 'rb+')
481 488 except IOError as e:
482 489 if e.errno != errno.ENOENT:
483 490 raise
484 491 fp = repo.wvfs('.hgtags', 'ab')
485 492 else:
486 493 prevtags = fp.read()
487 494
488 495 # committed tags are stored in UTF-8
489 496 writetags(fp, names, encoding.fromlocal, prevtags)
490 497
491 498 fp.close()
492 499
493 500 repo.invalidatecaches()
494 501
495 502 if '.hgtags' not in repo.dirstate:
496 503 repo[None].add(['.hgtags'])
497 504
498 505 m = matchmod.exact(repo.root, '', ['.hgtags'])
499 506 tagnode = repo.commit(message, user, date, extra=extra, match=m,
500 507 editor=editor)
501 508
502 509 for name in names:
503 510 repo.hook('tag', node=hex(node), tag=name, local=local)
504 511
505 512 return tagnode
506 513
507 514 _fnodescachefile = 'cache/hgtagsfnodes1'
508 515 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
509 516 _fnodesmissingrec = '\xff' * 24
510 517
511 518 class hgtagsfnodescache(object):
512 519 """Persistent cache mapping revisions to .hgtags filenodes.
513 520
514 521 The cache is an array of records. Each item in the array corresponds to
515 522 a changelog revision. Values in the array contain the first 4 bytes of
516 523 the node hash and the 20 bytes .hgtags filenode for that revision.
517 524
518 525 The first 4 bytes are present as a form of verification. Repository
519 526 stripping and rewriting may change the node at a numeric revision in the
520 527 changelog. The changeset fragment serves as a verifier to detect
521 528 rewriting. This logic is shared with the rev branch cache (see
522 529 branchmap.py).
523 530
524 531 The instance holds in memory the full cache content but entries are
525 532 only parsed on read.
526 533
527 534 Instances behave like lists. ``c[i]`` works where i is a rev or
528 535 changeset node. Missing indexes are populated automatically on access.
529 536 """
530 537 def __init__(self, repo):
531 538 assert repo.filtername is None
532 539
533 540 self._repo = repo
534 541
535 542 # Only for reporting purposes.
536 543 self.lookupcount = 0
537 544 self.hitcount = 0
538 545
539 546
540 547 try:
541 548 data = repo.vfs.read(_fnodescachefile)
542 549 except (OSError, IOError):
543 550 data = ""
544 551 self._raw = bytearray(data)
545 552
546 553 # The end state of self._raw is an array that is of the exact length
547 554 # required to hold a record for every revision in the repository.
548 555 # We truncate or extend the array as necessary. self._dirtyoffset is
549 556 # defined to be the start offset at which we need to write the output
550 557 # file. This offset is also adjusted when new entries are calculated
551 558 # for array members.
552 559 cllen = len(repo.changelog)
553 560 wantedlen = cllen * _fnodesrecsize
554 561 rawlen = len(self._raw)
555 562
556 563 self._dirtyoffset = None
557 564
558 565 if rawlen < wantedlen:
559 566 self._dirtyoffset = rawlen
560 567 self._raw.extend('\xff' * (wantedlen - rawlen))
561 568 elif rawlen > wantedlen:
562 569 # There's no easy way to truncate array instances. This seems
563 570 # slightly less evil than copying a potentially large array slice.
564 571 for i in range(rawlen - wantedlen):
565 572 self._raw.pop()
566 573 self._dirtyoffset = len(self._raw)
567 574
568 575 def getfnode(self, node, computemissing=True):
569 576 """Obtain the filenode of the .hgtags file at a specified revision.
570 577
571 578 If the value is in the cache, the entry will be validated and returned.
572 579 Otherwise, the filenode will be computed and returned unless
573 580 "computemissing" is False, in which case None will be returned without
574 581 any potentially expensive computation being performed.
575 582
576 583 If an .hgtags does not exist at the specified revision, nullid is
577 584 returned.
578 585 """
579 586 ctx = self._repo[node]
580 587 rev = ctx.rev()
581 588
582 589 self.lookupcount += 1
583 590
584 591 offset = rev * _fnodesrecsize
585 592 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
586 593 properprefix = node[0:4]
587 594
588 595 # Validate and return existing entry.
589 596 if record != _fnodesmissingrec:
590 597 fileprefix = record[0:4]
591 598
592 599 if fileprefix == properprefix:
593 600 self.hitcount += 1
594 601 return record[4:]
595 602
596 603 # Fall through.
597 604
598 605 # If we get here, the entry is either missing or invalid.
599 606
600 607 if not computemissing:
601 608 return None
602 609
603 610 # Populate missing entry.
604 611 try:
605 612 fnode = ctx.filenode('.hgtags')
606 613 except error.LookupError:
607 614 # No .hgtags file on this revision.
608 615 fnode = nullid
609 616
610 617 self._writeentry(offset, properprefix, fnode)
611 618 return fnode
612 619
613 620 def setfnode(self, node, fnode):
614 621 """Set the .hgtags filenode for a given changeset."""
615 622 assert len(fnode) == 20
616 623 ctx = self._repo[node]
617 624
618 625 # Do a lookup first to avoid writing if nothing has changed.
619 626 if self.getfnode(ctx.node(), computemissing=False) == fnode:
620 627 return
621 628
622 629 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
623 630
624 631 def _writeentry(self, offset, prefix, fnode):
625 632 # Slices on array instances only accept other array.
626 633 entry = bytearray(prefix + fnode)
627 634 self._raw[offset:offset + _fnodesrecsize] = entry
628 635 # self._dirtyoffset could be None.
629 636 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
630 637
631 638 def write(self):
632 639 """Perform all necessary writes to cache file.
633 640
634 641 This may no-op if no writes are needed or if a write lock could
635 642 not be obtained.
636 643 """
637 644 if self._dirtyoffset is None:
638 645 return
639 646
640 647 data = self._raw[self._dirtyoffset:]
641 648 if not data:
642 649 return
643 650
644 651 repo = self._repo
645 652
646 653 try:
647 654 lock = repo.wlock(wait=False)
648 655 except error.LockError:
649 656 repo.ui.log('tagscache',
650 657 'not writing .hg/%s because lock cannot be acquired\n' %
651 658 (_fnodescachefile))
652 659 return
653 660
654 661 try:
655 662 f = repo.vfs.open(_fnodescachefile, 'ab')
656 663 try:
657 664 # if the file has been truncated
658 665 actualoffset = f.tell()
659 666 if actualoffset < self._dirtyoffset:
660 667 self._dirtyoffset = actualoffset
661 668 data = self._raw[self._dirtyoffset:]
662 669 f.seek(self._dirtyoffset)
663 670 f.truncate()
664 671 repo.ui.log('tagscache',
665 672 'writing %d bytes to %s\n' % (
666 673 len(data), _fnodescachefile))
667 674 f.write(data)
668 675 self._dirtyoffset = None
669 676 finally:
670 677 f.close()
671 678 except (IOError, OSError) as inst:
672 679 repo.ui.log('tagscache',
673 680 "couldn't write %s: %s\n" % (
674 681 _fnodescachefile, inst))
675 682 finally:
676 683 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now