##// END OF EJS Templates
tags: extract fnode retrieval into its own function...
Pierre-Yves David -
r31705:5eb4d206 default
parent child Browse files
Show More
@@ -1,671 +1,680 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import errno
16 16
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 match as matchmod,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 # Tags computation can be expensive and caches exist to make it fast in
33 33 # the common case.
34 34 #
35 35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 36 # each revision in the repository. The file is effectively an array of
37 37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 38 # details.
39 39 #
40 40 # The .hgtags filenode cache grows in proportion to the length of the
41 41 # changelog. The file is truncated when the # changelog is stripped.
42 42 #
43 43 # The purpose of the filenode cache is to avoid the most expensive part
44 44 # of finding global tags, which is looking up the .hgtags filenode in the
45 45 # manifest for each head. This can take dozens or over 100ms for
46 46 # repositories with very large manifests. Multiplied by dozens or even
47 47 # hundreds of heads and there is a significant performance concern.
48 48 #
49 49 # There also exist a separate cache file for each repository filter.
50 50 # These "tags-*" files store information about the history of tags.
51 51 #
52 52 # The tags cache files consists of a cache validation line followed by
53 53 # a history of tags.
54 54 #
55 55 # The cache validation line has the format:
56 56 #
57 57 # <tiprev> <tipnode> [<filteredhash>]
58 58 #
59 59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 60 # node for that changeset. These redundantly identify the repository
61 61 # tip from the time the cache was written. In addition, <filteredhash>,
62 62 # if present, is a 40 character hex hash of the contents of the filtered
63 63 # revisions for this filter. If the set of filtered revs changes, the
64 64 # hash will change and invalidate the cache.
65 65 #
66 66 # The history part of the tags cache consists of lines of the form:
67 67 #
68 68 # <node> <tag>
69 69 #
70 70 # (This format is identical to that of .hgtags files.)
71 71 #
72 72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 73 # the tag is associated with.
74 74 #
75 75 # Tags are written sorted by tag name.
76 76 #
77 77 # Tags associated with multiple changesets have an entry for each changeset.
78 78 # The most recent changeset (in terms of revlog ordering for the head
79 79 # setting it) for each tag is last.
80 80
81 81 def findglobaltags(ui, repo, alltags, tagtypes):
82 82 '''Find global tags in a repo.
83 83
84 84 "alltags" maps tag name to (node, hist) 2-tuples.
85 85
86 86 "tagtypes" maps tag name to tag type. Global tags always have the
87 87 "global" tag type.
88 88
89 89 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
90 90 should be passed in.
91 91
92 92 The tags cache is read and updated as a side-effect of calling.
93 93 '''
94 94 # This is so we can be lazy and assume alltags contains only global
95 95 # tags when we pass it to _writetagcache().
96 96 assert len(alltags) == len(tagtypes) == 0, \
97 97 "findglobaltags() should be called first"
98 98
99 99 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
100 100 if cachetags is not None:
101 101 assert not shouldwrite
102 102 # XXX is this really 100% correct? are there oddball special
103 103 # cases where a global tag should outrank a local tag but won't,
104 104 # because cachetags does not contain rank info?
105 105 _updatetags(cachetags, 'global', alltags, tagtypes)
106 106 return
107 107
108 108 seen = set() # set of fnode
109 109 fctx = None
110 110 for head in reversed(heads): # oldest to newest
111 111 assert head in repo.changelog.nodemap, \
112 112 "tag cache returned bogus head %s" % short(head)
113 113
114 114 fnode = tagfnode.get(head)
115 115 if fnode and fnode not in seen:
116 116 seen.add(fnode)
117 117 if not fctx:
118 118 fctx = repo.filectx('.hgtags', fileid=fnode)
119 119 else:
120 120 fctx = fctx.filectx(fnode)
121 121
122 122 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
123 123 _updatetags(filetags, 'global', alltags, tagtypes)
124 124
125 125 # and update the cache (if necessary)
126 126 if shouldwrite:
127 127 _writetagcache(ui, repo, valid, alltags)
128 128
129 129 def readlocaltags(ui, repo, alltags, tagtypes):
130 130 '''Read local tags in repo. Update alltags and tagtypes.'''
131 131 try:
132 132 data = repo.vfs.read("localtags")
133 133 except IOError as inst:
134 134 if inst.errno != errno.ENOENT:
135 135 raise
136 136 return
137 137
138 138 # localtags is in the local encoding; re-encode to UTF-8 on
139 139 # input for consistency with the rest of this module.
140 140 filetags = _readtags(
141 141 ui, repo, data.splitlines(), "localtags",
142 142 recode=encoding.fromlocal)
143 143
144 144 # remove tags pointing to invalid nodes
145 145 cl = repo.changelog
146 146 for t in filetags.keys():
147 147 try:
148 148 cl.rev(filetags[t][0])
149 149 except (LookupError, ValueError):
150 150 del filetags[t]
151 151
152 152 _updatetags(filetags, "local", alltags, tagtypes)
153 153
154 154 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
155 155 '''Read tag definitions from a file (or any source of lines).
156 156
157 157 This function returns two sortdicts with similar information:
158 158
159 159 - the first dict, bintaghist, contains the tag information as expected by
160 160 the _readtags function, i.e. a mapping from tag name to (node, hist):
161 161 - node is the node id from the last line read for that name,
162 162 - hist is the list of node ids previously associated with it (in file
163 163 order). All node ids are binary, not hex.
164 164
165 165 - the second dict, hextaglines, is a mapping from tag name to a list of
166 166 [hexnode, line number] pairs, ordered from the oldest to the newest node.
167 167
168 168 When calcnodelines is False the hextaglines dict is not calculated (an
169 169 empty dict is returned). This is done to improve this function's
170 170 performance in cases where the line numbers are not needed.
171 171 '''
172 172
173 173 bintaghist = util.sortdict()
174 174 hextaglines = util.sortdict()
175 175 count = 0
176 176
177 177 def dbg(msg):
178 178 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
179 179
180 180 for nline, line in enumerate(lines):
181 181 count += 1
182 182 if not line:
183 183 continue
184 184 try:
185 185 (nodehex, name) = line.split(" ", 1)
186 186 except ValueError:
187 187 dbg("cannot parse entry")
188 188 continue
189 189 name = name.strip()
190 190 if recode:
191 191 name = recode(name)
192 192 try:
193 193 nodebin = bin(nodehex)
194 194 except TypeError:
195 195 dbg("node '%s' is not well formed" % nodehex)
196 196 continue
197 197
198 198 # update filetags
199 199 if calcnodelines:
200 200 # map tag name to a list of line numbers
201 201 if name not in hextaglines:
202 202 hextaglines[name] = []
203 203 hextaglines[name].append([nodehex, nline])
204 204 continue
205 205 # map tag name to (node, hist)
206 206 if name not in bintaghist:
207 207 bintaghist[name] = []
208 208 bintaghist[name].append(nodebin)
209 209 return bintaghist, hextaglines
210 210
211 211 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
212 212 '''Read tag definitions from a file (or any source of lines).
213 213
214 214 Returns a mapping from tag name to (node, hist).
215 215
216 216 "node" is the node id from the last line read for that name. "hist"
217 217 is the list of node ids previously associated with it (in file order).
218 218 All node ids are binary, not hex.
219 219 '''
220 220 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
221 221 calcnodelines=calcnodelines)
222 222 # util.sortdict().__setitem__ is much slower at replacing then inserting
223 223 # new entries. The difference can matter if there are thousands of tags.
224 224 # Create a new sortdict to avoid the performance penalty.
225 225 newtags = util.sortdict()
226 226 for tag, taghist in filetags.items():
227 227 newtags[tag] = (taghist[-1], taghist[:-1])
228 228 return newtags
229 229
230 230 def _updatetags(filetags, tagtype, alltags, tagtypes):
231 231 '''Incorporate the tag info read from one file into the two
232 232 dictionaries, alltags and tagtypes, that contain all tag
233 233 info (global across all heads plus local).'''
234 234
235 235 for name, nodehist in filetags.iteritems():
236 236 if name not in alltags:
237 237 alltags[name] = nodehist
238 238 tagtypes[name] = tagtype
239 239 continue
240 240
241 241 # we prefer alltags[name] if:
242 242 # it supersedes us OR
243 243 # mutual supersedes and it has a higher rank
244 244 # otherwise we win because we're tip-most
245 245 anode, ahist = nodehist
246 246 bnode, bhist = alltags[name]
247 247 if (bnode != anode and anode in bhist and
248 248 (bnode not in ahist or len(bhist) > len(ahist))):
249 249 anode = bnode
250 250 else:
251 251 tagtypes[name] = tagtype
252 252 ahist.extend([n for n in bhist if n not in ahist])
253 253 alltags[name] = anode, ahist
254 254
255 255 def _filename(repo):
256 256 """name of a tagcache file for a given repo or repoview"""
257 257 filename = 'cache/tags2'
258 258 if repo.filtername:
259 259 filename = '%s-%s' % (filename, repo.filtername)
260 260 return filename
261 261
262 262 def _readtagcache(ui, repo):
263 263 '''Read the tag cache.
264 264
265 265 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
266 266
267 267 If the cache is completely up-to-date, "cachetags" is a dict of the
268 268 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
269 269 None and "shouldwrite" is False.
270 270
271 271 If the cache is not up to date, "cachetags" is None. "heads" is a list
272 272 of all heads currently in the repository, ordered from tip to oldest.
273 273 "validinfo" is a tuple describing cache validation info. This is used
274 274 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
275 275 filenode. "shouldwrite" is True.
276 276
277 277 If the cache is not up to date, the caller is responsible for reading tag
278 278 info from each returned head. (See findglobaltags().)
279 279 '''
280 280 try:
281 281 cachefile = repo.vfs(_filename(repo), 'r')
282 282 # force reading the file for static-http
283 283 cachelines = iter(cachefile)
284 284 except IOError:
285 285 cachefile = None
286 286
287 287 cacherev = None
288 288 cachenode = None
289 289 cachehash = None
290 290 if cachefile:
291 291 try:
292 292 validline = next(cachelines)
293 293 validline = validline.split()
294 294 cacherev = int(validline[0])
295 295 cachenode = bin(validline[1])
296 296 if len(validline) > 2:
297 297 cachehash = bin(validline[2])
298 298 except Exception:
299 299 # corruption of the cache, just recompute it.
300 300 pass
301 301
302 302 tipnode = repo.changelog.tip()
303 303 tiprev = len(repo.changelog) - 1
304 304
305 305 # Case 1 (common): tip is the same, so nothing has changed.
306 306 # (Unchanged tip trivially means no changesets have been added.
307 307 # But, thanks to localrepository.destroyed(), it also means none
308 308 # have been destroyed by strip or rollback.)
309 309 if (cacherev == tiprev
310 310 and cachenode == tipnode
311 311 and cachehash == scmutil.filteredhash(repo, tiprev)):
312 312 tags = _readtags(ui, repo, cachelines, cachefile.name)
313 313 cachefile.close()
314 314 return (None, None, None, tags, False)
315 315 if cachefile:
316 316 cachefile.close() # ignore rest of file
317 317
318 318 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
319 319
320 320 repoheads = repo.heads()
321 321 # Case 2 (uncommon): empty repo; get out quickly and don't bother
322 322 # writing an empty cache.
323 323 if repoheads == [nullid]:
324 324 return ([], {}, valid, {}, False)
325 325
326 326 # Case 3 (uncommon): cache file missing or empty.
327 327
328 328 # Case 4 (uncommon): tip rev decreased. This should only happen
329 329 # when we're called from localrepository.destroyed(). Refresh the
330 330 # cache so future invocations will not see disappeared heads in the
331 331 # cache.
332 332
333 333 # Case 5 (common): tip has changed, so we've added/replaced heads.
334 334
335 335 # As it happens, the code to handle cases 3, 4, 5 is the same.
336 336
337 337 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
338 338 # exposed".
339 339 if not len(repo.file('.hgtags')):
340 340 # No tags have ever been committed, so we can avoid a
341 341 # potentially expensive search.
342 342 return ([], {}, valid, None, True)
343 343
344 starttime = util.timer()
345 344
346 345 # Now we have to lookup the .hgtags filenode for every new head.
347 346 # This is the most expensive part of finding tags, so performance
348 347 # depends primarily on the size of newheads. Worst case: no cache
349 348 # file, so newheads == repoheads.
349 cachefnode = _getfnodes(ui, repo, repoheads)
350
351 # Caller has to iterate over all heads, but can use the filenodes in
352 # cachefnode to get to each .hgtags revision quickly.
353 return (repoheads, cachefnode, valid, None, True)
354
355 def _getfnodes(ui, repo, nodes):
356 """return .hgtags fnodes for a list of changeset nodes
357
358 Return value is a {node: fnode} mapping. There will be no entry for nodes
359 without a '.hgtags' file.
360 """
361 starttime = util.timer()
350 362 fnodescache = hgtagsfnodescache(repo.unfiltered())
351 363 cachefnode = {}
352 for head in reversed(repoheads):
364 for head in reversed(nodes):
353 365 fnode = fnodescache.getfnode(head)
354 366 if fnode != nullid:
355 367 cachefnode[head] = fnode
356 368
357 369 fnodescache.write()
358 370
359 371 duration = util.timer() - starttime
360 372 ui.log('tagscache',
361 373 '%d/%d cache hits/lookups in %0.4f '
362 374 'seconds\n',
363 375 fnodescache.hitcount, fnodescache.lookupcount, duration)
364
365 # Caller has to iterate over all heads, but can use the filenodes in
366 # cachefnode to get to each .hgtags revision quickly.
367 return (repoheads, cachefnode, valid, None, True)
376 return cachefnode
368 377
369 378 def _writetagcache(ui, repo, valid, cachetags):
370 379 filename = _filename(repo)
371 380 try:
372 381 cachefile = repo.vfs(filename, 'w', atomictemp=True)
373 382 except (OSError, IOError):
374 383 return
375 384
376 385 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
377 386 filename, len(cachetags))
378 387
379 388 if valid[2]:
380 389 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
381 390 else:
382 391 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
383 392
384 393 # Tag names in the cache are in UTF-8 -- which is the whole reason
385 394 # we keep them in UTF-8 throughout this module. If we converted
386 395 # them local encoding on input, we would lose info writing them to
387 396 # the cache.
388 397 for (name, (node, hist)) in sorted(cachetags.iteritems()):
389 398 for n in hist:
390 399 cachefile.write("%s %s\n" % (hex(n), name))
391 400 cachefile.write("%s %s\n" % (hex(node), name))
392 401
393 402 try:
394 403 cachefile.close()
395 404 except (OSError, IOError):
396 405 pass
397 406
398 407 def tag(repo, names, node, message, local, user, date, editor=False):
399 408 '''tag a revision with one or more symbolic names.
400 409
401 410 names is a list of strings or, when adding a single tag, names may be a
402 411 string.
403 412
404 413 if local is True, the tags are stored in a per-repository file.
405 414 otherwise, they are stored in the .hgtags file, and a new
406 415 changeset is committed with the change.
407 416
408 417 keyword arguments:
409 418
410 419 local: whether to store tags in non-version-controlled file
411 420 (default False)
412 421
413 422 message: commit message to use if committing
414 423
415 424 user: name of user to use if committing
416 425
417 426 date: date tuple to use if committing'''
418 427
419 428 if not local:
420 429 m = matchmod.exact(repo.root, '', ['.hgtags'])
421 430 if any(repo.status(match=m, unknown=True, ignored=True)):
422 431 raise error.Abort(_('working copy of .hgtags is changed'),
423 432 hint=_('please commit .hgtags manually'))
424 433
425 434 repo.tags() # instantiate the cache
426 435 _tag(repo.unfiltered(), names, node, message, local, user, date,
427 436 editor=editor)
428 437
429 438 def _tag(repo, names, node, message, local, user, date, extra=None,
430 439 editor=False):
431 440 if isinstance(names, str):
432 441 names = (names,)
433 442
434 443 branches = repo.branchmap()
435 444 for name in names:
436 445 repo.hook('pretag', throw=True, node=hex(node), tag=name,
437 446 local=local)
438 447 if name in branches:
439 448 repo.ui.warn(_("warning: tag %s conflicts with existing"
440 449 " branch name\n") % name)
441 450
442 451 def writetags(fp, names, munge, prevtags):
443 452 fp.seek(0, 2)
444 453 if prevtags and prevtags[-1] != '\n':
445 454 fp.write('\n')
446 455 for name in names:
447 456 if munge:
448 457 m = munge(name)
449 458 else:
450 459 m = name
451 460
452 461 if (repo._tagscache.tagtypes and
453 462 name in repo._tagscache.tagtypes):
454 463 old = repo.tags().get(name, nullid)
455 464 fp.write('%s %s\n' % (hex(old), m))
456 465 fp.write('%s %s\n' % (hex(node), m))
457 466 fp.close()
458 467
459 468 prevtags = ''
460 469 if local:
461 470 try:
462 471 fp = repo.vfs('localtags', 'r+')
463 472 except IOError:
464 473 fp = repo.vfs('localtags', 'a')
465 474 else:
466 475 prevtags = fp.read()
467 476
468 477 # local tags are stored in the current charset
469 478 writetags(fp, names, None, prevtags)
470 479 for name in names:
471 480 repo.hook('tag', node=hex(node), tag=name, local=local)
472 481 return
473 482
474 483 try:
475 484 fp = repo.wvfs('.hgtags', 'rb+')
476 485 except IOError as e:
477 486 if e.errno != errno.ENOENT:
478 487 raise
479 488 fp = repo.wvfs('.hgtags', 'ab')
480 489 else:
481 490 prevtags = fp.read()
482 491
483 492 # committed tags are stored in UTF-8
484 493 writetags(fp, names, encoding.fromlocal, prevtags)
485 494
486 495 fp.close()
487 496
488 497 repo.invalidatecaches()
489 498
490 499 if '.hgtags' not in repo.dirstate:
491 500 repo[None].add(['.hgtags'])
492 501
493 502 m = matchmod.exact(repo.root, '', ['.hgtags'])
494 503 tagnode = repo.commit(message, user, date, extra=extra, match=m,
495 504 editor=editor)
496 505
497 506 for name in names:
498 507 repo.hook('tag', node=hex(node), tag=name, local=local)
499 508
500 509 return tagnode
501 510
502 511 _fnodescachefile = 'cache/hgtagsfnodes1'
503 512 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
504 513 _fnodesmissingrec = '\xff' * 24
505 514
506 515 class hgtagsfnodescache(object):
507 516 """Persistent cache mapping revisions to .hgtags filenodes.
508 517
509 518 The cache is an array of records. Each item in the array corresponds to
510 519 a changelog revision. Values in the array contain the first 4 bytes of
511 520 the node hash and the 20 bytes .hgtags filenode for that revision.
512 521
513 522 The first 4 bytes are present as a form of verification. Repository
514 523 stripping and rewriting may change the node at a numeric revision in the
515 524 changelog. The changeset fragment serves as a verifier to detect
516 525 rewriting. This logic is shared with the rev branch cache (see
517 526 branchmap.py).
518 527
519 528 The instance holds in memory the full cache content but entries are
520 529 only parsed on read.
521 530
522 531 Instances behave like lists. ``c[i]`` works where i is a rev or
523 532 changeset node. Missing indexes are populated automatically on access.
524 533 """
525 534 def __init__(self, repo):
526 535 assert repo.filtername is None
527 536
528 537 self._repo = repo
529 538
530 539 # Only for reporting purposes.
531 540 self.lookupcount = 0
532 541 self.hitcount = 0
533 542
534 543
535 544 try:
536 545 data = repo.vfs.read(_fnodescachefile)
537 546 except (OSError, IOError):
538 547 data = ""
539 548 self._raw = bytearray(data)
540 549
541 550 # The end state of self._raw is an array that is of the exact length
542 551 # required to hold a record for every revision in the repository.
543 552 # We truncate or extend the array as necessary. self._dirtyoffset is
544 553 # defined to be the start offset at which we need to write the output
545 554 # file. This offset is also adjusted when new entries are calculated
546 555 # for array members.
547 556 cllen = len(repo.changelog)
548 557 wantedlen = cllen * _fnodesrecsize
549 558 rawlen = len(self._raw)
550 559
551 560 self._dirtyoffset = None
552 561
553 562 if rawlen < wantedlen:
554 563 self._dirtyoffset = rawlen
555 564 self._raw.extend('\xff' * (wantedlen - rawlen))
556 565 elif rawlen > wantedlen:
557 566 # There's no easy way to truncate array instances. This seems
558 567 # slightly less evil than copying a potentially large array slice.
559 568 for i in range(rawlen - wantedlen):
560 569 self._raw.pop()
561 570 self._dirtyoffset = len(self._raw)
562 571
563 572 def getfnode(self, node, computemissing=True):
564 573 """Obtain the filenode of the .hgtags file at a specified revision.
565 574
566 575 If the value is in the cache, the entry will be validated and returned.
567 576 Otherwise, the filenode will be computed and returned unless
568 577 "computemissing" is False, in which case None will be returned without
569 578 any potentially expensive computation being performed.
570 579
571 580 If an .hgtags does not exist at the specified revision, nullid is
572 581 returned.
573 582 """
574 583 ctx = self._repo[node]
575 584 rev = ctx.rev()
576 585
577 586 self.lookupcount += 1
578 587
579 588 offset = rev * _fnodesrecsize
580 589 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
581 590 properprefix = node[0:4]
582 591
583 592 # Validate and return existing entry.
584 593 if record != _fnodesmissingrec:
585 594 fileprefix = record[0:4]
586 595
587 596 if fileprefix == properprefix:
588 597 self.hitcount += 1
589 598 return record[4:]
590 599
591 600 # Fall through.
592 601
593 602 # If we get here, the entry is either missing or invalid.
594 603
595 604 if not computemissing:
596 605 return None
597 606
598 607 # Populate missing entry.
599 608 try:
600 609 fnode = ctx.filenode('.hgtags')
601 610 except error.LookupError:
602 611 # No .hgtags file on this revision.
603 612 fnode = nullid
604 613
605 614 self._writeentry(offset, properprefix, fnode)
606 615 return fnode
607 616
608 617 def setfnode(self, node, fnode):
609 618 """Set the .hgtags filenode for a given changeset."""
610 619 assert len(fnode) == 20
611 620 ctx = self._repo[node]
612 621
613 622 # Do a lookup first to avoid writing if nothing has changed.
614 623 if self.getfnode(ctx.node(), computemissing=False) == fnode:
615 624 return
616 625
617 626 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
618 627
619 628 def _writeentry(self, offset, prefix, fnode):
620 629 # Slices on array instances only accept other array.
621 630 entry = bytearray(prefix + fnode)
622 631 self._raw[offset:offset + _fnodesrecsize] = entry
623 632 # self._dirtyoffset could be None.
624 633 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
625 634
626 635 def write(self):
627 636 """Perform all necessary writes to cache file.
628 637
629 638 This may no-op if no writes are needed or if a write lock could
630 639 not be obtained.
631 640 """
632 641 if self._dirtyoffset is None:
633 642 return
634 643
635 644 data = self._raw[self._dirtyoffset:]
636 645 if not data:
637 646 return
638 647
639 648 repo = self._repo
640 649
641 650 try:
642 651 lock = repo.wlock(wait=False)
643 652 except error.LockError:
644 653 repo.ui.log('tagscache',
645 654 'not writing .hg/%s because lock cannot be acquired\n' %
646 655 (_fnodescachefile))
647 656 return
648 657
649 658 try:
650 659 f = repo.vfs.open(_fnodescachefile, 'ab')
651 660 try:
652 661 # if the file has been truncated
653 662 actualoffset = f.tell()
654 663 if actualoffset < self._dirtyoffset:
655 664 self._dirtyoffset = actualoffset
656 665 data = self._raw[self._dirtyoffset:]
657 666 f.seek(self._dirtyoffset)
658 667 f.truncate()
659 668 repo.ui.log('tagscache',
660 669 'writing %d bytes to %s\n' % (
661 670 len(data), _fnodescachefile))
662 671 f.write(data)
663 672 self._dirtyoffset = None
664 673 finally:
665 674 f.close()
666 675 except (IOError, OSError) as inst:
667 676 repo.ui.log('tagscache',
668 677 "couldn't write %s: %s\n" % (
669 678 _fnodescachefile, inst))
670 679 finally:
671 680 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now