##// END OF EJS Templates
tags: do not feed dictionaries to 'findglobaltags'...
Pierre-Yves David -
r31706:63d4deda default
parent child Browse files
Show More
@@ -1,1984 +1,1985 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'))
118 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'))
252 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'))
254 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 @property
389 389 def wopener(self):
390 390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 391 return self.wvfs
392 392
393 393 @property
394 394 def opener(self):
395 395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 396 return self.vfs
397 397
398 398 def close(self):
399 399 self._writecaches()
400 400
401 401 def _loadextensions(self):
402 402 extensions.loadall(self.ui)
403 403
404 404 def _writecaches(self):
405 405 if self._revbranchcache:
406 406 self._revbranchcache.write()
407 407
408 408 def _restrictcapabilities(self, caps):
409 409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 410 caps = set(caps)
411 411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 413 return caps
414 414
415 415 def _applyopenerreqs(self):
416 416 self.svfs.options = dict((r, 1) for r in self.requirements
417 417 if r in self.openerreqs)
418 418 # experimental config: format.chunkcachesize
419 419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 420 if chunkcachesize is not None:
421 421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 422 # experimental config: format.maxchainlen
423 423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 424 if maxchainlen is not None:
425 425 self.svfs.options['maxchainlen'] = maxchainlen
426 426 # experimental config: format.manifestcachesize
427 427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 428 if manifestcachesize is not None:
429 429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 430 # experimental config: format.aggressivemergedeltas
431 431 aggressivemergedeltas = self.ui.configbool('format',
432 432 'aggressivemergedeltas', False)
433 433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435 435
436 436 for r in self.requirements:
437 437 if r.startswith('exp-compression-'):
438 438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439 439
440 440 def _writerequirements(self):
441 441 scmutil.writerequires(self.vfs, self.requirements)
442 442
443 443 def _checknested(self, path):
444 444 """Determine if path is a legal nested repository."""
445 445 if not path.startswith(self.root):
446 446 return False
447 447 subpath = path[len(self.root) + 1:]
448 448 normsubpath = util.pconvert(subpath)
449 449
450 450 # XXX: Checking against the current working copy is wrong in
451 451 # the sense that it can reject things like
452 452 #
453 453 # $ hg cat -r 10 sub/x.txt
454 454 #
455 455 # if sub/ is no longer a subrepository in the working copy
456 456 # parent revision.
457 457 #
458 458 # However, it can of course also allow things that would have
459 459 # been rejected before, such as the above cat command if sub/
460 460 # is a subrepository now, but was a normal directory before.
461 461 # The old path auditor would have rejected by mistake since it
462 462 # panics when it sees sub/.hg/.
463 463 #
464 464 # All in all, checking against the working copy seems sensible
465 465 # since we want to prevent access to nested repositories on
466 466 # the filesystem *now*.
467 467 ctx = self[None]
468 468 parts = util.splitpath(subpath)
469 469 while parts:
470 470 prefix = '/'.join(parts)
471 471 if prefix in ctx.substate:
472 472 if prefix == normsubpath:
473 473 return True
474 474 else:
475 475 sub = ctx.sub(prefix)
476 476 return sub.checknested(subpath[len(prefix) + 1:])
477 477 else:
478 478 parts.pop()
479 479 return False
480 480
481 481 def peer(self):
482 482 return localpeer(self) # not cached to avoid reference cycle
483 483
484 484 def unfiltered(self):
485 485 """Return unfiltered version of the repository
486 486
487 487 Intended to be overwritten by filtered repo."""
488 488 return self
489 489
490 490 def filtered(self, name):
491 491 """Return a filtered version of a repository"""
492 492 # build a new class with the mixin and the current class
493 493 # (possibly subclass of the repo)
494 494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 495 pass
496 496 return filteredrepo(self, name)
497 497
498 498 @repofilecache('bookmarks', 'bookmarks.current')
499 499 def _bookmarks(self):
500 500 return bookmarks.bmstore(self)
501 501
502 502 @property
503 503 def _activebookmark(self):
504 504 return self._bookmarks.active
505 505
506 506 def bookmarkheads(self, bookmark):
507 507 name = bookmark.split('@', 1)[0]
508 508 heads = []
509 509 for mark, n in self._bookmarks.iteritems():
510 510 if mark.split('@', 1)[0] == name:
511 511 heads.append(n)
512 512 return heads
513 513
514 514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 516 # can't be easily expressed in filecache mechanism.
517 517 @storecache('phaseroots', '00changelog.i')
518 518 def _phasecache(self):
519 519 return phases.phasecache(self, self._phasedefaults)
520 520
521 521 @storecache('obsstore')
522 522 def obsstore(self):
523 523 # read default format for new obsstore.
524 524 # developer config: format.obsstore-version
525 525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 526 # rely on obsstore class default when possible.
527 527 kwargs = {}
528 528 if defaultformat is not None:
529 529 kwargs['defaultformat'] = defaultformat
530 530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 532 **kwargs)
533 533 if store and readonly:
534 534 self.ui.warn(
535 535 _('obsolete feature not enabled but %i markers found!\n')
536 536 % len(list(store)))
537 537 return store
538 538
539 539 @storecache('00changelog.i')
540 540 def changelog(self):
541 541 c = changelog.changelog(self.svfs)
542 542 if txnutil.mayhavepending(self.root):
543 543 c.readpending('00changelog.i.a')
544 544 return c
545 545
546 546 def _constructmanifest(self):
547 547 # This is a temporary function while we migrate from manifest to
548 548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 549 # manifest creation.
550 550 return manifest.manifestrevlog(self.svfs)
551 551
552 552 @storecache('00manifest.i')
553 553 def manifestlog(self):
554 554 return manifest.manifestlog(self.svfs, self)
555 555
556 556 @repofilecache('dirstate')
557 557 def dirstate(self):
558 558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 559 self._dirstatevalidate)
560 560
561 561 def _dirstatevalidate(self, node):
562 562 try:
563 563 self.changelog.rev(node)
564 564 return node
565 565 except error.LookupError:
566 566 if not self._dirstatevalidatewarned:
567 567 self._dirstatevalidatewarned = True
568 568 self.ui.warn(_("warning: ignoring unknown"
569 569 " working parent %s!\n") % short(node))
570 570 return nullid
571 571
572 572 def __getitem__(self, changeid):
573 573 if changeid is None or changeid == wdirrev:
574 574 return context.workingctx(self)
575 575 if isinstance(changeid, slice):
576 576 return [context.changectx(self, i)
577 577 for i in xrange(*changeid.indices(len(self)))
578 578 if i not in self.changelog.filteredrevs]
579 579 return context.changectx(self, changeid)
580 580
581 581 def __contains__(self, changeid):
582 582 try:
583 583 self[changeid]
584 584 return True
585 585 except error.RepoLookupError:
586 586 return False
587 587
588 588 def __nonzero__(self):
589 589 return True
590 590
591 591 __bool__ = __nonzero__
592 592
593 593 def __len__(self):
594 594 return len(self.changelog)
595 595
596 596 def __iter__(self):
597 597 return iter(self.changelog)
598 598
599 599 def revs(self, expr, *args):
600 600 '''Find revisions matching a revset.
601 601
602 602 The revset is specified as a string ``expr`` that may contain
603 603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 604
605 605 Revset aliases from the configuration are not expanded. To expand
606 606 user aliases, consider calling ``scmutil.revrange()`` or
607 607 ``repo.anyrevs([expr], user=True)``.
608 608
609 609 Returns a revset.abstractsmartset, which is a list-like interface
610 610 that contains integer revisions.
611 611 '''
612 612 expr = revsetlang.formatspec(expr, *args)
613 613 m = revset.match(None, expr)
614 614 return m(self)
615 615
616 616 def set(self, expr, *args):
617 617 '''Find revisions matching a revset and emit changectx instances.
618 618
619 619 This is a convenience wrapper around ``revs()`` that iterates the
620 620 result and is a generator of changectx instances.
621 621
622 622 Revset aliases from the configuration are not expanded. To expand
623 623 user aliases, consider calling ``scmutil.revrange()``.
624 624 '''
625 625 for r in self.revs(expr, *args):
626 626 yield self[r]
627 627
628 628 def anyrevs(self, specs, user=False):
629 629 '''Find revisions matching one of the given revsets.
630 630
631 631 Revset aliases from the configuration are not expanded by default. To
632 632 expand user aliases, specify ``user=True``.
633 633 '''
634 634 if user:
635 635 m = revset.matchany(self.ui, specs, repo=self)
636 636 else:
637 637 m = revset.matchany(None, specs)
638 638 return m(self)
639 639
640 640 def url(self):
641 641 return 'file:' + self.root
642 642
643 643 def hook(self, name, throw=False, **args):
644 644 """Call a hook, passing this repo instance.
645 645
646 646 This a convenience method to aid invoking hooks. Extensions likely
647 647 won't call this unless they have registered a custom hook or are
648 648 replacing code that is expected to call a hook.
649 649 """
650 650 return hook.hook(self.ui, self, name, throw, **args)
651 651
652 652 def tag(self, names, node, message, local, user, date, editor=False):
653 653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
654 654 tagsmod.tag(self, names, node, message, local, user, date,
655 655 editor=editor)
656 656
657 657 @filteredpropertycache
658 658 def _tagscache(self):
659 659 '''Returns a tagscache object that contains various tags related
660 660 caches.'''
661 661
662 662 # This simplifies its cache management by having one decorated
663 663 # function (this one) and the rest simply fetch things from it.
664 664 class tagscache(object):
665 665 def __init__(self):
666 666 # These two define the set of tags for this repository. tags
667 667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 668 # 'local'. (Global tags are defined by .hgtags across all
669 669 # heads, and local tags are defined in .hg/localtags.)
670 670 # They constitute the in-memory cache of tags.
671 671 self.tags = self.tagtypes = None
672 672
673 673 self.nodetagscache = self.tagslist = None
674 674
675 675 cache = tagscache()
676 676 cache.tags, cache.tagtypes = self._findtags()
677 677
678 678 return cache
679 679
680 680 def tags(self):
681 681 '''return a mapping of tag to node'''
682 682 t = {}
683 683 if self.changelog.filteredrevs:
684 684 tags, tt = self._findtags()
685 685 else:
686 686 tags = self._tagscache.tags
687 687 for k, v in tags.iteritems():
688 688 try:
689 689 # ignore tags to unknown nodes
690 690 self.changelog.rev(v)
691 691 t[k] = v
692 692 except (error.LookupError, ValueError):
693 693 pass
694 694 return t
695 695
696 696 def _findtags(self):
697 697 '''Do the hard work of finding tags. Return a pair of dicts
698 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 699 maps tag name to a string like \'global\' or \'local\'.
700 700 Subclasses or extensions are free to add their own tags, but
701 701 should be aware that the returned dicts will be retained for the
702 702 duration of the localrepo object.'''
703 703
704 704 # XXX what tagtype should subclasses/extensions use? Currently
705 705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 706 # Should each extension invent its own tag type? Should there
707 707 # be one tagtype for all such "virtual" tags? Or is the status
708 708 # quo fine?
709 709
710 alltags = {} # map tag name to (node, hist)
711 tagtypes = {}
712 710
713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
711 globaldata = tagsmod.findglobaltags(self.ui, self)
712 alltags = globaldata[0] # map tag name to (node, hist)
713 tagtypes = globaldata[1] # map tag name to tag type
714
714 715 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
715 716
716 717 # Build the return dicts. Have to re-encode tag names because
717 718 # the tags module always uses UTF-8 (in order not to lose info
718 719 # writing to the cache), but the rest of Mercurial wants them in
719 720 # local encoding.
720 721 tags = {}
721 722 for (name, (node, hist)) in alltags.iteritems():
722 723 if node != nullid:
723 724 tags[encoding.tolocal(name)] = node
724 725 tags['tip'] = self.changelog.tip()
725 726 tagtypes = dict([(encoding.tolocal(name), value)
726 727 for (name, value) in tagtypes.iteritems()])
727 728 return (tags, tagtypes)
728 729
729 730 def tagtype(self, tagname):
730 731 '''
731 732 return the type of the given tag. result can be:
732 733
733 734 'local' : a local tag
734 735 'global' : a global tag
735 736 None : tag does not exist
736 737 '''
737 738
738 739 return self._tagscache.tagtypes.get(tagname)
739 740
740 741 def tagslist(self):
741 742 '''return a list of tags ordered by revision'''
742 743 if not self._tagscache.tagslist:
743 744 l = []
744 745 for t, n in self.tags().iteritems():
745 746 l.append((self.changelog.rev(n), t, n))
746 747 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
747 748
748 749 return self._tagscache.tagslist
749 750
750 751 def nodetags(self, node):
751 752 '''return the tags associated with a node'''
752 753 if not self._tagscache.nodetagscache:
753 754 nodetagscache = {}
754 755 for t, n in self._tagscache.tags.iteritems():
755 756 nodetagscache.setdefault(n, []).append(t)
756 757 for tags in nodetagscache.itervalues():
757 758 tags.sort()
758 759 self._tagscache.nodetagscache = nodetagscache
759 760 return self._tagscache.nodetagscache.get(node, [])
760 761
761 762 def nodebookmarks(self, node):
762 763 """return the list of bookmarks pointing to the specified node"""
763 764 marks = []
764 765 for bookmark, n in self._bookmarks.iteritems():
765 766 if n == node:
766 767 marks.append(bookmark)
767 768 return sorted(marks)
768 769
769 770 def branchmap(self):
770 771 '''returns a dictionary {branch: [branchheads]} with branchheads
771 772 ordered by increasing revision number'''
772 773 branchmap.updatecache(self)
773 774 return self._branchcaches[self.filtername]
774 775
775 776 @unfilteredmethod
776 777 def revbranchcache(self):
777 778 if not self._revbranchcache:
778 779 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
779 780 return self._revbranchcache
780 781
781 782 def branchtip(self, branch, ignoremissing=False):
782 783 '''return the tip node for a given branch
783 784
784 785 If ignoremissing is True, then this method will not raise an error.
785 786 This is helpful for callers that only expect None for a missing branch
786 787 (e.g. namespace).
787 788
788 789 '''
789 790 try:
790 791 return self.branchmap().branchtip(branch)
791 792 except KeyError:
792 793 if not ignoremissing:
793 794 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
794 795 else:
795 796 pass
796 797
797 798 def lookup(self, key):
798 799 return self[key].node()
799 800
800 801 def lookupbranch(self, key, remote=None):
801 802 repo = remote or self
802 803 if key in repo.branchmap():
803 804 return key
804 805
805 806 repo = (remote and remote.local()) and remote or self
806 807 return repo[key].branch()
807 808
808 809 def known(self, nodes):
809 810 cl = self.changelog
810 811 nm = cl.nodemap
811 812 filtered = cl.filteredrevs
812 813 result = []
813 814 for n in nodes:
814 815 r = nm.get(n)
815 816 resp = not (r is None or r in filtered)
816 817 result.append(resp)
817 818 return result
818 819
819 820 def local(self):
820 821 return self
821 822
822 823 def publishing(self):
823 824 # it's safe (and desirable) to trust the publish flag unconditionally
824 825 # so that we don't finalize changes shared between users via ssh or nfs
825 826 return self.ui.configbool('phases', 'publish', True, untrusted=True)
826 827
827 828 def cancopy(self):
828 829 # so statichttprepo's override of local() works
829 830 if not self.local():
830 831 return False
831 832 if not self.publishing():
832 833 return True
833 834 # if publishing we can't copy if there is filtered content
834 835 return not self.filtered('visible').changelog.filteredrevs
835 836
836 837 def shared(self):
837 838 '''the type of shared repository (None if not shared)'''
838 839 if self.sharedpath != self.path:
839 840 return 'store'
840 841 return None
841 842
842 843 def join(self, f, *insidef):
843 844 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
844 845 return self.vfs.join(os.path.join(f, *insidef))
845 846
846 847 def wjoin(self, f, *insidef):
847 848 return self.vfs.reljoin(self.root, f, *insidef)
848 849
849 850 def file(self, f):
850 851 if f[0] == '/':
851 852 f = f[1:]
852 853 return filelog.filelog(self.svfs, f)
853 854
854 855 def changectx(self, changeid):
855 856 return self[changeid]
856 857
857 858 def setparents(self, p1, p2=nullid):
858 859 self.dirstate.beginparentchange()
859 860 copies = self.dirstate.setparents(p1, p2)
860 861 pctx = self[p1]
861 862 if copies:
862 863 # Adjust copy records, the dirstate cannot do it, it
863 864 # requires access to parents manifests. Preserve them
864 865 # only for entries added to first parent.
865 866 for f in copies:
866 867 if f not in pctx and copies[f] in pctx:
867 868 self.dirstate.copy(copies[f], f)
868 869 if p2 == nullid:
869 870 for f, s in sorted(self.dirstate.copies().items()):
870 871 if f not in pctx and s not in pctx:
871 872 self.dirstate.copy(None, f)
872 873 self.dirstate.endparentchange()
873 874
874 875 def filectx(self, path, changeid=None, fileid=None):
875 876 """changeid can be a changeset revision, node, or tag.
876 877 fileid can be a file revision or node."""
877 878 return context.filectx(self, path, changeid, fileid)
878 879
879 880 def getcwd(self):
880 881 return self.dirstate.getcwd()
881 882
882 883 def pathto(self, f, cwd=None):
883 884 return self.dirstate.pathto(f, cwd)
884 885
885 886 def wfile(self, f, mode='r'):
886 887 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
887 888 return self.wvfs(f, mode)
888 889
889 890 def _link(self, f):
890 891 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
891 892 '4.0')
892 893 return self.wvfs.islink(f)
893 894
894 895 def _loadfilter(self, filter):
895 896 if filter not in self.filterpats:
896 897 l = []
897 898 for pat, cmd in self.ui.configitems(filter):
898 899 if cmd == '!':
899 900 continue
900 901 mf = matchmod.match(self.root, '', [pat])
901 902 fn = None
902 903 params = cmd
903 904 for name, filterfn in self._datafilters.iteritems():
904 905 if cmd.startswith(name):
905 906 fn = filterfn
906 907 params = cmd[len(name):].lstrip()
907 908 break
908 909 if not fn:
909 910 fn = lambda s, c, **kwargs: util.filter(s, c)
910 911 # Wrap old filters not supporting keyword arguments
911 912 if not inspect.getargspec(fn)[2]:
912 913 oldfn = fn
913 914 fn = lambda s, c, **kwargs: oldfn(s, c)
914 915 l.append((mf, fn, params))
915 916 self.filterpats[filter] = l
916 917 return self.filterpats[filter]
917 918
918 919 def _filter(self, filterpats, filename, data):
919 920 for mf, fn, cmd in filterpats:
920 921 if mf(filename):
921 922 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 923 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 924 break
924 925
925 926 return data
926 927
927 928 @unfilteredpropertycache
928 929 def _encodefilterpats(self):
929 930 return self._loadfilter('encode')
930 931
931 932 @unfilteredpropertycache
932 933 def _decodefilterpats(self):
933 934 return self._loadfilter('decode')
934 935
935 936 def adddatafilter(self, name, filter):
936 937 self._datafilters[name] = filter
937 938
938 939 def wread(self, filename):
939 940 if self.wvfs.islink(filename):
940 941 data = self.wvfs.readlink(filename)
941 942 else:
942 943 data = self.wvfs.read(filename)
943 944 return self._filter(self._encodefilterpats, filename, data)
944 945
945 946 def wwrite(self, filename, data, flags, backgroundclose=False):
946 947 """write ``data`` into ``filename`` in the working directory
947 948
948 949 This returns length of written (maybe decoded) data.
949 950 """
950 951 data = self._filter(self._decodefilterpats, filename, data)
951 952 if 'l' in flags:
952 953 self.wvfs.symlink(data, filename)
953 954 else:
954 955 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
955 956 if 'x' in flags:
956 957 self.wvfs.setflags(filename, False, True)
957 958 return len(data)
958 959
959 960 def wwritedata(self, filename, data):
960 961 return self._filter(self._decodefilterpats, filename, data)
961 962
962 963 def currenttransaction(self):
963 964 """return the current transaction or None if non exists"""
964 965 if self._transref:
965 966 tr = self._transref()
966 967 else:
967 968 tr = None
968 969
969 970 if tr and tr.running():
970 971 return tr
971 972 return None
972 973
973 974 def transaction(self, desc, report=None):
974 975 if (self.ui.configbool('devel', 'all-warnings')
975 976 or self.ui.configbool('devel', 'check-locks')):
976 977 if self._currentlock(self._lockref) is None:
977 978 raise error.ProgrammingError('transaction requires locking')
978 979 tr = self.currenttransaction()
979 980 if tr is not None:
980 981 return tr.nest()
981 982
982 983 # abort here if the journal already exists
983 984 if self.svfs.exists("journal"):
984 985 raise error.RepoError(
985 986 _("abandoned transaction found"),
986 987 hint=_("run 'hg recover' to clean up transaction"))
987 988
988 989 idbase = "%.40f#%f" % (random.random(), time.time())
989 990 ha = hex(hashlib.sha1(idbase).digest())
990 991 txnid = 'TXN:' + ha
991 992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
992 993
993 994 self._writejournal(desc)
994 995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
995 996 if report:
996 997 rp = report
997 998 else:
998 999 rp = self.ui.warn
999 1000 vfsmap = {'plain': self.vfs} # root of .hg/
1000 1001 # we must avoid cyclic reference between repo and transaction.
1001 1002 reporef = weakref.ref(self)
1002 1003 def validate(tr):
1003 1004 """will run pre-closing hooks"""
1004 1005 reporef().hook('pretxnclose', throw=True,
1005 1006 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1006 1007 def releasefn(tr, success):
1007 1008 repo = reporef()
1008 1009 if success:
1009 1010 # this should be explicitly invoked here, because
1010 1011 # in-memory changes aren't written out at closing
1011 1012 # transaction, if tr.addfilegenerator (via
1012 1013 # dirstate.write or so) isn't invoked while
1013 1014 # transaction running
1014 1015 repo.dirstate.write(None)
1015 1016 else:
1016 1017 # discard all changes (including ones already written
1017 1018 # out) in this transaction
1018 1019 repo.dirstate.restorebackup(None, prefix='journal.')
1019 1020
1020 1021 repo.invalidate(clearfilecache=True)
1021 1022
1022 1023 tr = transaction.transaction(rp, self.svfs, vfsmap,
1023 1024 "journal",
1024 1025 "undo",
1025 1026 aftertrans(renames),
1026 1027 self.store.createmode,
1027 1028 validator=validate,
1028 1029 releasefn=releasefn)
1029 1030
1030 1031 tr.hookargs['txnid'] = txnid
1031 1032 # note: writing the fncache only during finalize mean that the file is
1032 1033 # outdated when running hooks. As fncache is used for streaming clone,
1033 1034 # this is not expected to break anything that happen during the hooks.
1034 1035 tr.addfinalize('flush-fncache', self.store.write)
1035 1036 def txnclosehook(tr2):
1036 1037 """To be run if transaction is successful, will schedule a hook run
1037 1038 """
1038 1039 # Don't reference tr2 in hook() so we don't hold a reference.
1039 1040 # This reduces memory consumption when there are multiple
1040 1041 # transactions per lock. This can likely go away if issue5045
1041 1042 # fixes the function accumulation.
1042 1043 hookargs = tr2.hookargs
1043 1044
1044 1045 def hook():
1045 1046 reporef().hook('txnclose', throw=False, txnname=desc,
1046 1047 **pycompat.strkwargs(hookargs))
1047 1048 reporef()._afterlock(hook)
1048 1049 tr.addfinalize('txnclose-hook', txnclosehook)
1049 1050 def txnaborthook(tr2):
1050 1051 """To be run if transaction is aborted
1051 1052 """
1052 1053 reporef().hook('txnabort', throw=False, txnname=desc,
1053 1054 **tr2.hookargs)
1054 1055 tr.addabort('txnabort-hook', txnaborthook)
1055 1056 # avoid eager cache invalidation. in-memory data should be identical
1056 1057 # to stored data if transaction has no error.
1057 1058 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1058 1059 self._transref = weakref.ref(tr)
1059 1060 return tr
1060 1061
1061 1062 def _journalfiles(self):
1062 1063 return ((self.svfs, 'journal'),
1063 1064 (self.vfs, 'journal.dirstate'),
1064 1065 (self.vfs, 'journal.branch'),
1065 1066 (self.vfs, 'journal.desc'),
1066 1067 (self.vfs, 'journal.bookmarks'),
1067 1068 (self.svfs, 'journal.phaseroots'))
1068 1069
1069 1070 def undofiles(self):
1070 1071 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1071 1072
1072 1073 def _writejournal(self, desc):
1073 1074 self.dirstate.savebackup(None, prefix='journal.')
1074 1075 self.vfs.write("journal.branch",
1075 1076 encoding.fromlocal(self.dirstate.branch()))
1076 1077 self.vfs.write("journal.desc",
1077 1078 "%d\n%s\n" % (len(self), desc))
1078 1079 self.vfs.write("journal.bookmarks",
1079 1080 self.vfs.tryread("bookmarks"))
1080 1081 self.svfs.write("journal.phaseroots",
1081 1082 self.svfs.tryread("phaseroots"))
1082 1083
1083 1084 def recover(self):
1084 1085 with self.lock():
1085 1086 if self.svfs.exists("journal"):
1086 1087 self.ui.status(_("rolling back interrupted transaction\n"))
1087 1088 vfsmap = {'': self.svfs,
1088 1089 'plain': self.vfs,}
1089 1090 transaction.rollback(self.svfs, vfsmap, "journal",
1090 1091 self.ui.warn)
1091 1092 self.invalidate()
1092 1093 return True
1093 1094 else:
1094 1095 self.ui.warn(_("no interrupted transaction available\n"))
1095 1096 return False
1096 1097
1097 1098 def rollback(self, dryrun=False, force=False):
1098 1099 wlock = lock = dsguard = None
1099 1100 try:
1100 1101 wlock = self.wlock()
1101 1102 lock = self.lock()
1102 1103 if self.svfs.exists("undo"):
1103 1104 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1104 1105
1105 1106 return self._rollback(dryrun, force, dsguard)
1106 1107 else:
1107 1108 self.ui.warn(_("no rollback information available\n"))
1108 1109 return 1
1109 1110 finally:
1110 1111 release(dsguard, lock, wlock)
1111 1112
1112 1113 @unfilteredmethod # Until we get smarter cache management
1113 1114 def _rollback(self, dryrun, force, dsguard):
1114 1115 ui = self.ui
1115 1116 try:
1116 1117 args = self.vfs.read('undo.desc').splitlines()
1117 1118 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1118 1119 if len(args) >= 3:
1119 1120 detail = args[2]
1120 1121 oldtip = oldlen - 1
1121 1122
1122 1123 if detail and ui.verbose:
1123 1124 msg = (_('repository tip rolled back to revision %s'
1124 1125 ' (undo %s: %s)\n')
1125 1126 % (oldtip, desc, detail))
1126 1127 else:
1127 1128 msg = (_('repository tip rolled back to revision %s'
1128 1129 ' (undo %s)\n')
1129 1130 % (oldtip, desc))
1130 1131 except IOError:
1131 1132 msg = _('rolling back unknown transaction\n')
1132 1133 desc = None
1133 1134
1134 1135 if not force and self['.'] != self['tip'] and desc == 'commit':
1135 1136 raise error.Abort(
1136 1137 _('rollback of last commit while not checked out '
1137 1138 'may lose data'), hint=_('use -f to force'))
1138 1139
1139 1140 ui.status(msg)
1140 1141 if dryrun:
1141 1142 return 0
1142 1143
1143 1144 parents = self.dirstate.parents()
1144 1145 self.destroying()
1145 1146 vfsmap = {'plain': self.vfs, '': self.svfs}
1146 1147 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1147 1148 if self.vfs.exists('undo.bookmarks'):
1148 1149 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1149 1150 if self.svfs.exists('undo.phaseroots'):
1150 1151 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1151 1152 self.invalidate()
1152 1153
1153 1154 parentgone = (parents[0] not in self.changelog.nodemap or
1154 1155 parents[1] not in self.changelog.nodemap)
1155 1156 if parentgone:
1156 1157 # prevent dirstateguard from overwriting already restored one
1157 1158 dsguard.close()
1158 1159
1159 1160 self.dirstate.restorebackup(None, prefix='undo.')
1160 1161 try:
1161 1162 branch = self.vfs.read('undo.branch')
1162 1163 self.dirstate.setbranch(encoding.tolocal(branch))
1163 1164 except IOError:
1164 1165 ui.warn(_('named branch could not be reset: '
1165 1166 'current branch is still \'%s\'\n')
1166 1167 % self.dirstate.branch())
1167 1168
1168 1169 parents = tuple([p.rev() for p in self[None].parents()])
1169 1170 if len(parents) > 1:
1170 1171 ui.status(_('working directory now based on '
1171 1172 'revisions %d and %d\n') % parents)
1172 1173 else:
1173 1174 ui.status(_('working directory now based on '
1174 1175 'revision %d\n') % parents)
1175 1176 mergemod.mergestate.clean(self, self['.'].node())
1176 1177
1177 1178 # TODO: if we know which new heads may result from this rollback, pass
1178 1179 # them to destroy(), which will prevent the branchhead cache from being
1179 1180 # invalidated.
1180 1181 self.destroyed()
1181 1182 return 0
1182 1183
1183 1184 def invalidatecaches(self):
1184 1185
1185 1186 if '_tagscache' in vars(self):
1186 1187 # can't use delattr on proxy
1187 1188 del self.__dict__['_tagscache']
1188 1189
1189 1190 self.unfiltered()._branchcaches.clear()
1190 1191 self.invalidatevolatilesets()
1191 1192
1192 1193 def invalidatevolatilesets(self):
1193 1194 self.filteredrevcache.clear()
1194 1195 obsolete.clearobscaches(self)
1195 1196
1196 1197 def invalidatedirstate(self):
1197 1198 '''Invalidates the dirstate, causing the next call to dirstate
1198 1199 to check if it was modified since the last time it was read,
1199 1200 rereading it if it has.
1200 1201
1201 1202 This is different to dirstate.invalidate() that it doesn't always
1202 1203 rereads the dirstate. Use dirstate.invalidate() if you want to
1203 1204 explicitly read the dirstate again (i.e. restoring it to a previous
1204 1205 known good state).'''
1205 1206 if hasunfilteredcache(self, 'dirstate'):
1206 1207 for k in self.dirstate._filecache:
1207 1208 try:
1208 1209 delattr(self.dirstate, k)
1209 1210 except AttributeError:
1210 1211 pass
1211 1212 delattr(self.unfiltered(), 'dirstate')
1212 1213
1213 1214 def invalidate(self, clearfilecache=False):
1214 1215 '''Invalidates both store and non-store parts other than dirstate
1215 1216
1216 1217 If a transaction is running, invalidation of store is omitted,
1217 1218 because discarding in-memory changes might cause inconsistency
1218 1219 (e.g. incomplete fncache causes unintentional failure, but
1219 1220 redundant one doesn't).
1220 1221 '''
1221 1222 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1222 1223 for k in list(self._filecache.keys()):
1223 1224 # dirstate is invalidated separately in invalidatedirstate()
1224 1225 if k == 'dirstate':
1225 1226 continue
1226 1227
1227 1228 if clearfilecache:
1228 1229 del self._filecache[k]
1229 1230 try:
1230 1231 delattr(unfiltered, k)
1231 1232 except AttributeError:
1232 1233 pass
1233 1234 self.invalidatecaches()
1234 1235 if not self.currenttransaction():
1235 1236 # TODO: Changing contents of store outside transaction
1236 1237 # causes inconsistency. We should make in-memory store
1237 1238 # changes detectable, and abort if changed.
1238 1239 self.store.invalidatecaches()
1239 1240
1240 1241 def invalidateall(self):
1241 1242 '''Fully invalidates both store and non-store parts, causing the
1242 1243 subsequent operation to reread any outside changes.'''
1243 1244 # extension should hook this to invalidate its caches
1244 1245 self.invalidate()
1245 1246 self.invalidatedirstate()
1246 1247
1247 1248 @unfilteredmethod
1248 1249 def _refreshfilecachestats(self, tr):
1249 1250 """Reload stats of cached files so that they are flagged as valid"""
1250 1251 for k, ce in self._filecache.items():
1251 1252 if k == 'dirstate' or k not in self.__dict__:
1252 1253 continue
1253 1254 ce.refresh()
1254 1255
1255 1256 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1256 1257 inheritchecker=None, parentenvvar=None):
1257 1258 parentlock = None
1258 1259 # the contents of parentenvvar are used by the underlying lock to
1259 1260 # determine whether it can be inherited
1260 1261 if parentenvvar is not None:
1261 1262 parentlock = encoding.environ.get(parentenvvar)
1262 1263 try:
1263 1264 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1264 1265 acquirefn=acquirefn, desc=desc,
1265 1266 inheritchecker=inheritchecker,
1266 1267 parentlock=parentlock)
1267 1268 except error.LockHeld as inst:
1268 1269 if not wait:
1269 1270 raise
1270 1271 # show more details for new-style locks
1271 1272 if ':' in inst.locker:
1272 1273 host, pid = inst.locker.split(":", 1)
1273 1274 self.ui.warn(
1274 1275 _("waiting for lock on %s held by process %r "
1275 1276 "on host %r\n") % (desc, pid, host))
1276 1277 else:
1277 1278 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1278 1279 (desc, inst.locker))
1279 1280 # default to 600 seconds timeout
1280 1281 l = lockmod.lock(vfs, lockname,
1281 1282 int(self.ui.config("ui", "timeout", "600")),
1282 1283 releasefn=releasefn, acquirefn=acquirefn,
1283 1284 desc=desc)
1284 1285 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1285 1286 return l
1286 1287
1287 1288 def _afterlock(self, callback):
1288 1289 """add a callback to be run when the repository is fully unlocked
1289 1290
1290 1291 The callback will be executed when the outermost lock is released
1291 1292 (with wlock being higher level than 'lock')."""
1292 1293 for ref in (self._wlockref, self._lockref):
1293 1294 l = ref and ref()
1294 1295 if l and l.held:
1295 1296 l.postrelease.append(callback)
1296 1297 break
1297 1298 else: # no lock have been found.
1298 1299 callback()
1299 1300
1300 1301 def lock(self, wait=True):
1301 1302 '''Lock the repository store (.hg/store) and return a weak reference
1302 1303 to the lock. Use this before modifying the store (e.g. committing or
1303 1304 stripping). If you are opening a transaction, get a lock as well.)
1304 1305
1305 1306 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1306 1307 'wlock' first to avoid a dead-lock hazard.'''
1307 1308 l = self._currentlock(self._lockref)
1308 1309 if l is not None:
1309 1310 l.lock()
1310 1311 return l
1311 1312
1312 1313 l = self._lock(self.svfs, "lock", wait, None,
1313 1314 self.invalidate, _('repository %s') % self.origroot)
1314 1315 self._lockref = weakref.ref(l)
1315 1316 return l
1316 1317
1317 1318 def _wlockchecktransaction(self):
1318 1319 if self.currenttransaction() is not None:
1319 1320 raise error.LockInheritanceContractViolation(
1320 1321 'wlock cannot be inherited in the middle of a transaction')
1321 1322
1322 1323 def wlock(self, wait=True):
1323 1324 '''Lock the non-store parts of the repository (everything under
1324 1325 .hg except .hg/store) and return a weak reference to the lock.
1325 1326
1326 1327 Use this before modifying files in .hg.
1327 1328
1328 1329 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 1330 'wlock' first to avoid a dead-lock hazard.'''
1330 1331 l = self._wlockref and self._wlockref()
1331 1332 if l is not None and l.held:
1332 1333 l.lock()
1333 1334 return l
1334 1335
1335 1336 # We do not need to check for non-waiting lock acquisition. Such
1336 1337 # acquisition would not cause dead-lock as they would just fail.
1337 1338 if wait and (self.ui.configbool('devel', 'all-warnings')
1338 1339 or self.ui.configbool('devel', 'check-locks')):
1339 1340 if self._currentlock(self._lockref) is not None:
1340 1341 self.ui.develwarn('"wlock" acquired after "lock"')
1341 1342
1342 1343 def unlock():
1343 1344 if self.dirstate.pendingparentchange():
1344 1345 self.dirstate.invalidate()
1345 1346 else:
1346 1347 self.dirstate.write(None)
1347 1348
1348 1349 self._filecache['dirstate'].refresh()
1349 1350
1350 1351 l = self._lock(self.vfs, "wlock", wait, unlock,
1351 1352 self.invalidatedirstate, _('working directory of %s') %
1352 1353 self.origroot,
1353 1354 inheritchecker=self._wlockchecktransaction,
1354 1355 parentenvvar='HG_WLOCK_LOCKER')
1355 1356 self._wlockref = weakref.ref(l)
1356 1357 return l
1357 1358
1358 1359 def _currentlock(self, lockref):
1359 1360 """Returns the lock if it's held, or None if it's not."""
1360 1361 if lockref is None:
1361 1362 return None
1362 1363 l = lockref()
1363 1364 if l is None or not l.held:
1364 1365 return None
1365 1366 return l
1366 1367
1367 1368 def currentwlock(self):
1368 1369 """Returns the wlock if it's held, or None if it's not."""
1369 1370 return self._currentlock(self._wlockref)
1370 1371
1371 1372 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1372 1373 """
1373 1374 commit an individual file as part of a larger transaction
1374 1375 """
1375 1376
1376 1377 fname = fctx.path()
1377 1378 fparent1 = manifest1.get(fname, nullid)
1378 1379 fparent2 = manifest2.get(fname, nullid)
1379 1380 if isinstance(fctx, context.filectx):
1380 1381 node = fctx.filenode()
1381 1382 if node in [fparent1, fparent2]:
1382 1383 self.ui.debug('reusing %s filelog entry\n' % fname)
1383 1384 if manifest1.flags(fname) != fctx.flags():
1384 1385 changelist.append(fname)
1385 1386 return node
1386 1387
1387 1388 flog = self.file(fname)
1388 1389 meta = {}
1389 1390 copy = fctx.renamed()
1390 1391 if copy and copy[0] != fname:
1391 1392 # Mark the new revision of this file as a copy of another
1392 1393 # file. This copy data will effectively act as a parent
1393 1394 # of this new revision. If this is a merge, the first
1394 1395 # parent will be the nullid (meaning "look up the copy data")
1395 1396 # and the second one will be the other parent. For example:
1396 1397 #
1397 1398 # 0 --- 1 --- 3 rev1 changes file foo
1398 1399 # \ / rev2 renames foo to bar and changes it
1399 1400 # \- 2 -/ rev3 should have bar with all changes and
1400 1401 # should record that bar descends from
1401 1402 # bar in rev2 and foo in rev1
1402 1403 #
1403 1404 # this allows this merge to succeed:
1404 1405 #
1405 1406 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1406 1407 # \ / merging rev3 and rev4 should use bar@rev2
1407 1408 # \- 2 --- 4 as the merge base
1408 1409 #
1409 1410
1410 1411 cfname = copy[0]
1411 1412 crev = manifest1.get(cfname)
1412 1413 newfparent = fparent2
1413 1414
1414 1415 if manifest2: # branch merge
1415 1416 if fparent2 == nullid or crev is None: # copied on remote side
1416 1417 if cfname in manifest2:
1417 1418 crev = manifest2[cfname]
1418 1419 newfparent = fparent1
1419 1420
1420 1421 # Here, we used to search backwards through history to try to find
1421 1422 # where the file copy came from if the source of a copy was not in
1422 1423 # the parent directory. However, this doesn't actually make sense to
1423 1424 # do (what does a copy from something not in your working copy even
1424 1425 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1425 1426 # the user that copy information was dropped, so if they didn't
1426 1427 # expect this outcome it can be fixed, but this is the correct
1427 1428 # behavior in this circumstance.
1428 1429
1429 1430 if crev:
1430 1431 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1431 1432 meta["copy"] = cfname
1432 1433 meta["copyrev"] = hex(crev)
1433 1434 fparent1, fparent2 = nullid, newfparent
1434 1435 else:
1435 1436 self.ui.warn(_("warning: can't find ancestor for '%s' "
1436 1437 "copied from '%s'!\n") % (fname, cfname))
1437 1438
1438 1439 elif fparent1 == nullid:
1439 1440 fparent1, fparent2 = fparent2, nullid
1440 1441 elif fparent2 != nullid:
1441 1442 # is one parent an ancestor of the other?
1442 1443 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1443 1444 if fparent1 in fparentancestors:
1444 1445 fparent1, fparent2 = fparent2, nullid
1445 1446 elif fparent2 in fparentancestors:
1446 1447 fparent2 = nullid
1447 1448
1448 1449 # is the file changed?
1449 1450 text = fctx.data()
1450 1451 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1451 1452 changelist.append(fname)
1452 1453 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1453 1454 # are just the flags changed during merge?
1454 1455 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1455 1456 changelist.append(fname)
1456 1457
1457 1458 return fparent1
1458 1459
1459 1460 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1460 1461 """check for commit arguments that aren't committable"""
1461 1462 if match.isexact() or match.prefix():
1462 1463 matched = set(status.modified + status.added + status.removed)
1463 1464
1464 1465 for f in match.files():
1465 1466 f = self.dirstate.normalize(f)
1466 1467 if f == '.' or f in matched or f in wctx.substate:
1467 1468 continue
1468 1469 if f in status.deleted:
1469 1470 fail(f, _('file not found!'))
1470 1471 if f in vdirs: # visited directory
1471 1472 d = f + '/'
1472 1473 for mf in matched:
1473 1474 if mf.startswith(d):
1474 1475 break
1475 1476 else:
1476 1477 fail(f, _("no match under directory!"))
1477 1478 elif f not in self.dirstate:
1478 1479 fail(f, _("file not tracked!"))
1479 1480
1480 1481 @unfilteredmethod
1481 1482 def commit(self, text="", user=None, date=None, match=None, force=False,
1482 1483 editor=False, extra=None):
1483 1484 """Add a new revision to current repository.
1484 1485
1485 1486 Revision information is gathered from the working directory,
1486 1487 match can be used to filter the committed files. If editor is
1487 1488 supplied, it is called to get a commit message.
1488 1489 """
1489 1490 if extra is None:
1490 1491 extra = {}
1491 1492
1492 1493 def fail(f, msg):
1493 1494 raise error.Abort('%s: %s' % (f, msg))
1494 1495
1495 1496 if not match:
1496 1497 match = matchmod.always(self.root, '')
1497 1498
1498 1499 if not force:
1499 1500 vdirs = []
1500 1501 match.explicitdir = vdirs.append
1501 1502 match.bad = fail
1502 1503
1503 1504 wlock = lock = tr = None
1504 1505 try:
1505 1506 wlock = self.wlock()
1506 1507 lock = self.lock() # for recent changelog (see issue4368)
1507 1508
1508 1509 wctx = self[None]
1509 1510 merge = len(wctx.parents()) > 1
1510 1511
1511 1512 if not force and merge and match.ispartial():
1512 1513 raise error.Abort(_('cannot partially commit a merge '
1513 1514 '(do not specify files or patterns)'))
1514 1515
1515 1516 status = self.status(match=match, clean=force)
1516 1517 if force:
1517 1518 status.modified.extend(status.clean) # mq may commit clean files
1518 1519
1519 1520 # check subrepos
1520 1521 subs = []
1521 1522 commitsubs = set()
1522 1523 newstate = wctx.substate.copy()
1523 1524 # only manage subrepos and .hgsubstate if .hgsub is present
1524 1525 if '.hgsub' in wctx:
1525 1526 # we'll decide whether to track this ourselves, thanks
1526 1527 for c in status.modified, status.added, status.removed:
1527 1528 if '.hgsubstate' in c:
1528 1529 c.remove('.hgsubstate')
1529 1530
1530 1531 # compare current state to last committed state
1531 1532 # build new substate based on last committed state
1532 1533 oldstate = wctx.p1().substate
1533 1534 for s in sorted(newstate.keys()):
1534 1535 if not match(s):
1535 1536 # ignore working copy, use old state if present
1536 1537 if s in oldstate:
1537 1538 newstate[s] = oldstate[s]
1538 1539 continue
1539 1540 if not force:
1540 1541 raise error.Abort(
1541 1542 _("commit with new subrepo %s excluded") % s)
1542 1543 dirtyreason = wctx.sub(s).dirtyreason(True)
1543 1544 if dirtyreason:
1544 1545 if not self.ui.configbool('ui', 'commitsubrepos'):
1545 1546 raise error.Abort(dirtyreason,
1546 1547 hint=_("use --subrepos for recursive commit"))
1547 1548 subs.append(s)
1548 1549 commitsubs.add(s)
1549 1550 else:
1550 1551 bs = wctx.sub(s).basestate()
1551 1552 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1552 1553 if oldstate.get(s, (None, None, None))[1] != bs:
1553 1554 subs.append(s)
1554 1555
1555 1556 # check for removed subrepos
1556 1557 for p in wctx.parents():
1557 1558 r = [s for s in p.substate if s not in newstate]
1558 1559 subs += [s for s in r if match(s)]
1559 1560 if subs:
1560 1561 if (not match('.hgsub') and
1561 1562 '.hgsub' in (wctx.modified() + wctx.added())):
1562 1563 raise error.Abort(
1563 1564 _("can't commit subrepos without .hgsub"))
1564 1565 status.modified.insert(0, '.hgsubstate')
1565 1566
1566 1567 elif '.hgsub' in status.removed:
1567 1568 # clean up .hgsubstate when .hgsub is removed
1568 1569 if ('.hgsubstate' in wctx and
1569 1570 '.hgsubstate' not in (status.modified + status.added +
1570 1571 status.removed)):
1571 1572 status.removed.insert(0, '.hgsubstate')
1572 1573
1573 1574 # make sure all explicit patterns are matched
1574 1575 if not force:
1575 1576 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1576 1577
1577 1578 cctx = context.workingcommitctx(self, status,
1578 1579 text, user, date, extra)
1579 1580
1580 1581 # internal config: ui.allowemptycommit
1581 1582 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1582 1583 or extra.get('close') or merge or cctx.files()
1583 1584 or self.ui.configbool('ui', 'allowemptycommit'))
1584 1585 if not allowemptycommit:
1585 1586 return None
1586 1587
1587 1588 if merge and cctx.deleted():
1588 1589 raise error.Abort(_("cannot commit merge with missing files"))
1589 1590
1590 1591 ms = mergemod.mergestate.read(self)
1591 1592 mergeutil.checkunresolved(ms)
1592 1593
1593 1594 if editor:
1594 1595 cctx._text = editor(self, cctx, subs)
1595 1596 edited = (text != cctx._text)
1596 1597
1597 1598 # Save commit message in case this transaction gets rolled back
1598 1599 # (e.g. by a pretxncommit hook). Leave the content alone on
1599 1600 # the assumption that the user will use the same editor again.
1600 1601 msgfn = self.savecommitmessage(cctx._text)
1601 1602
1602 1603 # commit subs and write new state
1603 1604 if subs:
1604 1605 for s in sorted(commitsubs):
1605 1606 sub = wctx.sub(s)
1606 1607 self.ui.status(_('committing subrepository %s\n') %
1607 1608 subrepo.subrelpath(sub))
1608 1609 sr = sub.commit(cctx._text, user, date)
1609 1610 newstate[s] = (newstate[s][0], sr)
1610 1611 subrepo.writestate(self, newstate)
1611 1612
1612 1613 p1, p2 = self.dirstate.parents()
1613 1614 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1614 1615 try:
1615 1616 self.hook("precommit", throw=True, parent1=hookp1,
1616 1617 parent2=hookp2)
1617 1618 tr = self.transaction('commit')
1618 1619 ret = self.commitctx(cctx, True)
1619 1620 except: # re-raises
1620 1621 if edited:
1621 1622 self.ui.write(
1622 1623 _('note: commit message saved in %s\n') % msgfn)
1623 1624 raise
1624 1625 # update bookmarks, dirstate and mergestate
1625 1626 bookmarks.update(self, [p1, p2], ret)
1626 1627 cctx.markcommitted(ret)
1627 1628 ms.reset()
1628 1629 tr.close()
1629 1630
1630 1631 finally:
1631 1632 lockmod.release(tr, lock, wlock)
1632 1633
1633 1634 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1634 1635 # hack for command that use a temporary commit (eg: histedit)
1635 1636 # temporary commit got stripped before hook release
1636 1637 if self.changelog.hasnode(ret):
1637 1638 self.hook("commit", node=node, parent1=parent1,
1638 1639 parent2=parent2)
1639 1640 self._afterlock(commithook)
1640 1641 return ret
1641 1642
1642 1643 @unfilteredmethod
1643 1644 def commitctx(self, ctx, error=False):
1644 1645 """Add a new revision to current repository.
1645 1646 Revision information is passed via the context argument.
1646 1647 """
1647 1648
1648 1649 tr = None
1649 1650 p1, p2 = ctx.p1(), ctx.p2()
1650 1651 user = ctx.user()
1651 1652
1652 1653 lock = self.lock()
1653 1654 try:
1654 1655 tr = self.transaction("commit")
1655 1656 trp = weakref.proxy(tr)
1656 1657
1657 1658 if ctx.manifestnode():
1658 1659 # reuse an existing manifest revision
1659 1660 mn = ctx.manifestnode()
1660 1661 files = ctx.files()
1661 1662 elif ctx.files():
1662 1663 m1ctx = p1.manifestctx()
1663 1664 m2ctx = p2.manifestctx()
1664 1665 mctx = m1ctx.copy()
1665 1666
1666 1667 m = mctx.read()
1667 1668 m1 = m1ctx.read()
1668 1669 m2 = m2ctx.read()
1669 1670
1670 1671 # check in files
1671 1672 added = []
1672 1673 changed = []
1673 1674 removed = list(ctx.removed())
1674 1675 linkrev = len(self)
1675 1676 self.ui.note(_("committing files:\n"))
1676 1677 for f in sorted(ctx.modified() + ctx.added()):
1677 1678 self.ui.note(f + "\n")
1678 1679 try:
1679 1680 fctx = ctx[f]
1680 1681 if fctx is None:
1681 1682 removed.append(f)
1682 1683 else:
1683 1684 added.append(f)
1684 1685 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1685 1686 trp, changed)
1686 1687 m.setflag(f, fctx.flags())
1687 1688 except OSError as inst:
1688 1689 self.ui.warn(_("trouble committing %s!\n") % f)
1689 1690 raise
1690 1691 except IOError as inst:
1691 1692 errcode = getattr(inst, 'errno', errno.ENOENT)
1692 1693 if error or errcode and errcode != errno.ENOENT:
1693 1694 self.ui.warn(_("trouble committing %s!\n") % f)
1694 1695 raise
1695 1696
1696 1697 # update manifest
1697 1698 self.ui.note(_("committing manifest\n"))
1698 1699 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1699 1700 drop = [f for f in removed if f in m]
1700 1701 for f in drop:
1701 1702 del m[f]
1702 1703 mn = mctx.write(trp, linkrev,
1703 1704 p1.manifestnode(), p2.manifestnode(),
1704 1705 added, drop)
1705 1706 files = changed + removed
1706 1707 else:
1707 1708 mn = p1.manifestnode()
1708 1709 files = []
1709 1710
1710 1711 # update changelog
1711 1712 self.ui.note(_("committing changelog\n"))
1712 1713 self.changelog.delayupdate(tr)
1713 1714 n = self.changelog.add(mn, files, ctx.description(),
1714 1715 trp, p1.node(), p2.node(),
1715 1716 user, ctx.date(), ctx.extra().copy())
1716 1717 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1717 1718 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1718 1719 parent2=xp2)
1719 1720 # set the new commit is proper phase
1720 1721 targetphase = subrepo.newcommitphase(self.ui, ctx)
1721 1722 if targetphase:
1722 1723 # retract boundary do not alter parent changeset.
1723 1724 # if a parent have higher the resulting phase will
1724 1725 # be compliant anyway
1725 1726 #
1726 1727 # if minimal phase was 0 we don't need to retract anything
1727 1728 phases.retractboundary(self, tr, targetphase, [n])
1728 1729 tr.close()
1729 1730 branchmap.updatecache(self.filtered('served'))
1730 1731 return n
1731 1732 finally:
1732 1733 if tr:
1733 1734 tr.release()
1734 1735 lock.release()
1735 1736
1736 1737 @unfilteredmethod
1737 1738 def destroying(self):
1738 1739 '''Inform the repository that nodes are about to be destroyed.
1739 1740 Intended for use by strip and rollback, so there's a common
1740 1741 place for anything that has to be done before destroying history.
1741 1742
1742 1743 This is mostly useful for saving state that is in memory and waiting
1743 1744 to be flushed when the current lock is released. Because a call to
1744 1745 destroyed is imminent, the repo will be invalidated causing those
1745 1746 changes to stay in memory (waiting for the next unlock), or vanish
1746 1747 completely.
1747 1748 '''
1748 1749 # When using the same lock to commit and strip, the phasecache is left
1749 1750 # dirty after committing. Then when we strip, the repo is invalidated,
1750 1751 # causing those changes to disappear.
1751 1752 if '_phasecache' in vars(self):
1752 1753 self._phasecache.write()
1753 1754
1754 1755 @unfilteredmethod
1755 1756 def destroyed(self):
1756 1757 '''Inform the repository that nodes have been destroyed.
1757 1758 Intended for use by strip and rollback, so there's a common
1758 1759 place for anything that has to be done after destroying history.
1759 1760 '''
1760 1761 # When one tries to:
1761 1762 # 1) destroy nodes thus calling this method (e.g. strip)
1762 1763 # 2) use phasecache somewhere (e.g. commit)
1763 1764 #
1764 1765 # then 2) will fail because the phasecache contains nodes that were
1765 1766 # removed. We can either remove phasecache from the filecache,
1766 1767 # causing it to reload next time it is accessed, or simply filter
1767 1768 # the removed nodes now and write the updated cache.
1768 1769 self._phasecache.filterunknown(self)
1769 1770 self._phasecache.write()
1770 1771
1771 1772 # update the 'served' branch cache to help read only server process
1772 1773 # Thanks to branchcache collaboration this is done from the nearest
1773 1774 # filtered subset and it is expected to be fast.
1774 1775 branchmap.updatecache(self.filtered('served'))
1775 1776
1776 1777 # Ensure the persistent tag cache is updated. Doing it now
1777 1778 # means that the tag cache only has to worry about destroyed
1778 1779 # heads immediately after a strip/rollback. That in turn
1779 1780 # guarantees that "cachetip == currenttip" (comparing both rev
1780 1781 # and node) always means no nodes have been added or destroyed.
1781 1782
1782 1783 # XXX this is suboptimal when qrefresh'ing: we strip the current
1783 1784 # head, refresh the tag cache, then immediately add a new head.
1784 1785 # But I think doing it this way is necessary for the "instant
1785 1786 # tag cache retrieval" case to work.
1786 1787 self.invalidate()
1787 1788
1788 1789 def walk(self, match, node=None):
1789 1790 '''
1790 1791 walk recursively through the directory tree or a given
1791 1792 changeset, finding all files matched by the match
1792 1793 function
1793 1794 '''
1794 1795 return self[node].walk(match)
1795 1796
1796 1797 def status(self, node1='.', node2=None, match=None,
1797 1798 ignored=False, clean=False, unknown=False,
1798 1799 listsubrepos=False):
1799 1800 '''a convenience method that calls node1.status(node2)'''
1800 1801 return self[node1].status(node2, match, ignored, clean, unknown,
1801 1802 listsubrepos)
1802 1803
1803 1804 def heads(self, start=None):
1804 1805 if start is None:
1805 1806 cl = self.changelog
1806 1807 headrevs = reversed(cl.headrevs())
1807 1808 return [cl.node(rev) for rev in headrevs]
1808 1809
1809 1810 heads = self.changelog.heads(start)
1810 1811 # sort the output in rev descending order
1811 1812 return sorted(heads, key=self.changelog.rev, reverse=True)
1812 1813
1813 1814 def branchheads(self, branch=None, start=None, closed=False):
1814 1815 '''return a (possibly filtered) list of heads for the given branch
1815 1816
1816 1817 Heads are returned in topological order, from newest to oldest.
1817 1818 If branch is None, use the dirstate branch.
1818 1819 If start is not None, return only heads reachable from start.
1819 1820 If closed is True, return heads that are marked as closed as well.
1820 1821 '''
1821 1822 if branch is None:
1822 1823 branch = self[None].branch()
1823 1824 branches = self.branchmap()
1824 1825 if branch not in branches:
1825 1826 return []
1826 1827 # the cache returns heads ordered lowest to highest
1827 1828 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1828 1829 if start is not None:
1829 1830 # filter out the heads that cannot be reached from startrev
1830 1831 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1831 1832 bheads = [h for h in bheads if h in fbheads]
1832 1833 return bheads
1833 1834
1834 1835 def branches(self, nodes):
1835 1836 if not nodes:
1836 1837 nodes = [self.changelog.tip()]
1837 1838 b = []
1838 1839 for n in nodes:
1839 1840 t = n
1840 1841 while True:
1841 1842 p = self.changelog.parents(n)
1842 1843 if p[1] != nullid or p[0] == nullid:
1843 1844 b.append((t, n, p[0], p[1]))
1844 1845 break
1845 1846 n = p[0]
1846 1847 return b
1847 1848
1848 1849 def between(self, pairs):
1849 1850 r = []
1850 1851
1851 1852 for top, bottom in pairs:
1852 1853 n, l, i = top, [], 0
1853 1854 f = 1
1854 1855
1855 1856 while n != bottom and n != nullid:
1856 1857 p = self.changelog.parents(n)[0]
1857 1858 if i == f:
1858 1859 l.append(n)
1859 1860 f = f * 2
1860 1861 n = p
1861 1862 i += 1
1862 1863
1863 1864 r.append(l)
1864 1865
1865 1866 return r
1866 1867
1867 1868 def checkpush(self, pushop):
1868 1869 """Extensions can override this function if additional checks have
1869 1870 to be performed before pushing, or call it if they override push
1870 1871 command.
1871 1872 """
1872 1873 pass
1873 1874
1874 1875 @unfilteredpropertycache
1875 1876 def prepushoutgoinghooks(self):
1876 1877 """Return util.hooks consists of a pushop with repo, remote, outgoing
1877 1878 methods, which are called before pushing changesets.
1878 1879 """
1879 1880 return util.hooks()
1880 1881
1881 1882 def pushkey(self, namespace, key, old, new):
1882 1883 try:
1883 1884 tr = self.currenttransaction()
1884 1885 hookargs = {}
1885 1886 if tr is not None:
1886 1887 hookargs.update(tr.hookargs)
1887 1888 hookargs['namespace'] = namespace
1888 1889 hookargs['key'] = key
1889 1890 hookargs['old'] = old
1890 1891 hookargs['new'] = new
1891 1892 self.hook('prepushkey', throw=True, **hookargs)
1892 1893 except error.HookAbort as exc:
1893 1894 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1894 1895 if exc.hint:
1895 1896 self.ui.write_err(_("(%s)\n") % exc.hint)
1896 1897 return False
1897 1898 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1898 1899 ret = pushkey.push(self, namespace, key, old, new)
1899 1900 def runhook():
1900 1901 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1901 1902 ret=ret)
1902 1903 self._afterlock(runhook)
1903 1904 return ret
1904 1905
1905 1906 def listkeys(self, namespace):
1906 1907 self.hook('prelistkeys', throw=True, namespace=namespace)
1907 1908 self.ui.debug('listing keys for "%s"\n' % namespace)
1908 1909 values = pushkey.list(self, namespace)
1909 1910 self.hook('listkeys', namespace=namespace, values=values)
1910 1911 return values
1911 1912
1912 1913 def debugwireargs(self, one, two, three=None, four=None, five=None):
1913 1914 '''used to test argument passing over the wire'''
1914 1915 return "%s %s %s %s %s" % (one, two, three, four, five)
1915 1916
1916 1917 def savecommitmessage(self, text):
1917 1918 fp = self.vfs('last-message.txt', 'wb')
1918 1919 try:
1919 1920 fp.write(text)
1920 1921 finally:
1921 1922 fp.close()
1922 1923 return self.pathto(fp.name[len(self.root) + 1:])
1923 1924
1924 1925 # used to avoid circular references so destructors work
1925 1926 def aftertrans(files):
1926 1927 renamefiles = [tuple(t) for t in files]
1927 1928 def a():
1928 1929 for vfs, src, dest in renamefiles:
1929 1930 # if src and dest refer to a same file, vfs.rename is a no-op,
1930 1931 # leaving both src and dest on disk. delete dest to make sure
1931 1932 # the rename couldn't be such a no-op.
1932 1933 vfs.tryunlink(dest)
1933 1934 try:
1934 1935 vfs.rename(src, dest)
1935 1936 except OSError: # journal file does not yet exist
1936 1937 pass
1937 1938 return a
1938 1939
1939 1940 def undoname(fn):
1940 1941 base, name = os.path.split(fn)
1941 1942 assert name.startswith('journal')
1942 1943 return os.path.join(base, name.replace('journal', 'undo', 1))
1943 1944
1944 1945 def instance(ui, path, create):
1945 1946 return localrepository(ui, util.urllocalpath(path), create)
1946 1947
1947 1948 def islocal(path):
1948 1949 return True
1949 1950
1950 1951 def newreporequirements(repo):
1951 1952 """Determine the set of requirements for a new local repository.
1952 1953
1953 1954 Extensions can wrap this function to specify custom requirements for
1954 1955 new repositories.
1955 1956 """
1956 1957 ui = repo.ui
1957 1958 requirements = set(['revlogv1'])
1958 1959 if ui.configbool('format', 'usestore', True):
1959 1960 requirements.add('store')
1960 1961 if ui.configbool('format', 'usefncache', True):
1961 1962 requirements.add('fncache')
1962 1963 if ui.configbool('format', 'dotencode', True):
1963 1964 requirements.add('dotencode')
1964 1965
1965 1966 compengine = ui.config('experimental', 'format.compression', 'zlib')
1966 1967 if compengine not in util.compengines:
1967 1968 raise error.Abort(_('compression engine %s defined by '
1968 1969 'experimental.format.compression not available') %
1969 1970 compengine,
1970 1971 hint=_('run "hg debuginstall" to list available '
1971 1972 'compression engines'))
1972 1973
1973 1974 # zlib is the historical default and doesn't need an explicit requirement.
1974 1975 if compengine != 'zlib':
1975 1976 requirements.add('exp-compression-%s' % compengine)
1976 1977
1977 1978 if scmutil.gdinitconfig(ui):
1978 1979 requirements.add('generaldelta')
1979 1980 if ui.configbool('experimental', 'treemanifest', False):
1980 1981 requirements.add('treemanifest')
1981 1982 if ui.configbool('experimental', 'manifestv2', False):
1982 1983 requirements.add('manifestv2')
1983 1984
1984 1985 return requirements
@@ -1,680 +1,676 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13 from __future__ import absolute_import
14 14
15 15 import errno
16 16
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 match as matchmod,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 # Tags computation can be expensive and caches exist to make it fast in
33 33 # the common case.
34 34 #
35 35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 36 # each revision in the repository. The file is effectively an array of
37 37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 38 # details.
39 39 #
40 40 # The .hgtags filenode cache grows in proportion to the length of the
41 41 # changelog. The file is truncated when the # changelog is stripped.
42 42 #
43 43 # The purpose of the filenode cache is to avoid the most expensive part
44 44 # of finding global tags, which is looking up the .hgtags filenode in the
45 45 # manifest for each head. This can take dozens or over 100ms for
46 46 # repositories with very large manifests. Multiplied by dozens or even
47 47 # hundreds of heads and there is a significant performance concern.
48 48 #
49 49 # There also exist a separate cache file for each repository filter.
50 50 # These "tags-*" files store information about the history of tags.
51 51 #
52 52 # The tags cache files consists of a cache validation line followed by
53 53 # a history of tags.
54 54 #
55 55 # The cache validation line has the format:
56 56 #
57 57 # <tiprev> <tipnode> [<filteredhash>]
58 58 #
59 59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 60 # node for that changeset. These redundantly identify the repository
61 61 # tip from the time the cache was written. In addition, <filteredhash>,
62 62 # if present, is a 40 character hex hash of the contents of the filtered
63 63 # revisions for this filter. If the set of filtered revs changes, the
64 64 # hash will change and invalidate the cache.
65 65 #
66 66 # The history part of the tags cache consists of lines of the form:
67 67 #
68 68 # <node> <tag>
69 69 #
70 70 # (This format is identical to that of .hgtags files.)
71 71 #
72 72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 73 # the tag is associated with.
74 74 #
75 75 # Tags are written sorted by tag name.
76 76 #
77 77 # Tags associated with multiple changesets have an entry for each changeset.
78 78 # The most recent changeset (in terms of revlog ordering for the head
79 79 # setting it) for each tag is last.
80 80
81 def findglobaltags(ui, repo, alltags, tagtypes):
82 '''Find global tags in a repo.
81 def findglobaltags(ui, repo):
82 '''Find global tags in a repo: return (alltags, tagtypes)
83 83
84 84 "alltags" maps tag name to (node, hist) 2-tuples.
85 85
86 86 "tagtypes" maps tag name to tag type. Global tags always have the
87 87 "global" tag type.
88 88
89 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
90 should be passed in.
91
92 89 The tags cache is read and updated as a side-effect of calling.
93 90 '''
94 # This is so we can be lazy and assume alltags contains only global
95 # tags when we pass it to _writetagcache().
96 assert len(alltags) == len(tagtypes) == 0, \
97 "findglobaltags() should be called first"
91 alltags = {}
92 tagtypes = {}
98 93
99 94 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
100 95 if cachetags is not None:
101 96 assert not shouldwrite
102 97 # XXX is this really 100% correct? are there oddball special
103 98 # cases where a global tag should outrank a local tag but won't,
104 99 # because cachetags does not contain rank info?
105 100 _updatetags(cachetags, 'global', alltags, tagtypes)
106 return
101 return alltags, tagtypes
107 102
108 103 seen = set() # set of fnode
109 104 fctx = None
110 105 for head in reversed(heads): # oldest to newest
111 106 assert head in repo.changelog.nodemap, \
112 107 "tag cache returned bogus head %s" % short(head)
113 108
114 109 fnode = tagfnode.get(head)
115 110 if fnode and fnode not in seen:
116 111 seen.add(fnode)
117 112 if not fctx:
118 113 fctx = repo.filectx('.hgtags', fileid=fnode)
119 114 else:
120 115 fctx = fctx.filectx(fnode)
121 116
122 117 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
123 118 _updatetags(filetags, 'global', alltags, tagtypes)
124 119
125 120 # and update the cache (if necessary)
126 121 if shouldwrite:
127 122 _writetagcache(ui, repo, valid, alltags)
123 return alltags, tagtypes
128 124
129 125 def readlocaltags(ui, repo, alltags, tagtypes):
130 126 '''Read local tags in repo. Update alltags and tagtypes.'''
131 127 try:
132 128 data = repo.vfs.read("localtags")
133 129 except IOError as inst:
134 130 if inst.errno != errno.ENOENT:
135 131 raise
136 132 return
137 133
138 134 # localtags is in the local encoding; re-encode to UTF-8 on
139 135 # input for consistency with the rest of this module.
140 136 filetags = _readtags(
141 137 ui, repo, data.splitlines(), "localtags",
142 138 recode=encoding.fromlocal)
143 139
144 140 # remove tags pointing to invalid nodes
145 141 cl = repo.changelog
146 142 for t in filetags.keys():
147 143 try:
148 144 cl.rev(filetags[t][0])
149 145 except (LookupError, ValueError):
150 146 del filetags[t]
151 147
152 148 _updatetags(filetags, "local", alltags, tagtypes)
153 149
154 150 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
155 151 '''Read tag definitions from a file (or any source of lines).
156 152
157 153 This function returns two sortdicts with similar information:
158 154
159 155 - the first dict, bintaghist, contains the tag information as expected by
160 156 the _readtags function, i.e. a mapping from tag name to (node, hist):
161 157 - node is the node id from the last line read for that name,
162 158 - hist is the list of node ids previously associated with it (in file
163 159 order). All node ids are binary, not hex.
164 160
165 161 - the second dict, hextaglines, is a mapping from tag name to a list of
166 162 [hexnode, line number] pairs, ordered from the oldest to the newest node.
167 163
168 164 When calcnodelines is False the hextaglines dict is not calculated (an
169 165 empty dict is returned). This is done to improve this function's
170 166 performance in cases where the line numbers are not needed.
171 167 '''
172 168
173 169 bintaghist = util.sortdict()
174 170 hextaglines = util.sortdict()
175 171 count = 0
176 172
177 173 def dbg(msg):
178 174 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
179 175
180 176 for nline, line in enumerate(lines):
181 177 count += 1
182 178 if not line:
183 179 continue
184 180 try:
185 181 (nodehex, name) = line.split(" ", 1)
186 182 except ValueError:
187 183 dbg("cannot parse entry")
188 184 continue
189 185 name = name.strip()
190 186 if recode:
191 187 name = recode(name)
192 188 try:
193 189 nodebin = bin(nodehex)
194 190 except TypeError:
195 191 dbg("node '%s' is not well formed" % nodehex)
196 192 continue
197 193
198 194 # update filetags
199 195 if calcnodelines:
200 196 # map tag name to a list of line numbers
201 197 if name not in hextaglines:
202 198 hextaglines[name] = []
203 199 hextaglines[name].append([nodehex, nline])
204 200 continue
205 201 # map tag name to (node, hist)
206 202 if name not in bintaghist:
207 203 bintaghist[name] = []
208 204 bintaghist[name].append(nodebin)
209 205 return bintaghist, hextaglines
210 206
211 207 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
212 208 '''Read tag definitions from a file (or any source of lines).
213 209
214 210 Returns a mapping from tag name to (node, hist).
215 211
216 212 "node" is the node id from the last line read for that name. "hist"
217 213 is the list of node ids previously associated with it (in file order).
218 214 All node ids are binary, not hex.
219 215 '''
220 216 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
221 217 calcnodelines=calcnodelines)
222 218 # util.sortdict().__setitem__ is much slower at replacing then inserting
223 219 # new entries. The difference can matter if there are thousands of tags.
224 220 # Create a new sortdict to avoid the performance penalty.
225 221 newtags = util.sortdict()
226 222 for tag, taghist in filetags.items():
227 223 newtags[tag] = (taghist[-1], taghist[:-1])
228 224 return newtags
229 225
230 226 def _updatetags(filetags, tagtype, alltags, tagtypes):
231 227 '''Incorporate the tag info read from one file into the two
232 228 dictionaries, alltags and tagtypes, that contain all tag
233 229 info (global across all heads plus local).'''
234 230
235 231 for name, nodehist in filetags.iteritems():
236 232 if name not in alltags:
237 233 alltags[name] = nodehist
238 234 tagtypes[name] = tagtype
239 235 continue
240 236
241 237 # we prefer alltags[name] if:
242 238 # it supersedes us OR
243 239 # mutual supersedes and it has a higher rank
244 240 # otherwise we win because we're tip-most
245 241 anode, ahist = nodehist
246 242 bnode, bhist = alltags[name]
247 243 if (bnode != anode and anode in bhist and
248 244 (bnode not in ahist or len(bhist) > len(ahist))):
249 245 anode = bnode
250 246 else:
251 247 tagtypes[name] = tagtype
252 248 ahist.extend([n for n in bhist if n not in ahist])
253 249 alltags[name] = anode, ahist
254 250
255 251 def _filename(repo):
256 252 """name of a tagcache file for a given repo or repoview"""
257 253 filename = 'cache/tags2'
258 254 if repo.filtername:
259 255 filename = '%s-%s' % (filename, repo.filtername)
260 256 return filename
261 257
262 258 def _readtagcache(ui, repo):
263 259 '''Read the tag cache.
264 260
265 261 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
266 262
267 263 If the cache is completely up-to-date, "cachetags" is a dict of the
268 264 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
269 265 None and "shouldwrite" is False.
270 266
271 267 If the cache is not up to date, "cachetags" is None. "heads" is a list
272 268 of all heads currently in the repository, ordered from tip to oldest.
273 269 "validinfo" is a tuple describing cache validation info. This is used
274 270 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
275 271 filenode. "shouldwrite" is True.
276 272
277 273 If the cache is not up to date, the caller is responsible for reading tag
278 274 info from each returned head. (See findglobaltags().)
279 275 '''
280 276 try:
281 277 cachefile = repo.vfs(_filename(repo), 'r')
282 278 # force reading the file for static-http
283 279 cachelines = iter(cachefile)
284 280 except IOError:
285 281 cachefile = None
286 282
287 283 cacherev = None
288 284 cachenode = None
289 285 cachehash = None
290 286 if cachefile:
291 287 try:
292 288 validline = next(cachelines)
293 289 validline = validline.split()
294 290 cacherev = int(validline[0])
295 291 cachenode = bin(validline[1])
296 292 if len(validline) > 2:
297 293 cachehash = bin(validline[2])
298 294 except Exception:
299 295 # corruption of the cache, just recompute it.
300 296 pass
301 297
302 298 tipnode = repo.changelog.tip()
303 299 tiprev = len(repo.changelog) - 1
304 300
305 301 # Case 1 (common): tip is the same, so nothing has changed.
306 302 # (Unchanged tip trivially means no changesets have been added.
307 303 # But, thanks to localrepository.destroyed(), it also means none
308 304 # have been destroyed by strip or rollback.)
309 305 if (cacherev == tiprev
310 306 and cachenode == tipnode
311 307 and cachehash == scmutil.filteredhash(repo, tiprev)):
312 308 tags = _readtags(ui, repo, cachelines, cachefile.name)
313 309 cachefile.close()
314 310 return (None, None, None, tags, False)
315 311 if cachefile:
316 312 cachefile.close() # ignore rest of file
317 313
318 314 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
319 315
320 316 repoheads = repo.heads()
321 317 # Case 2 (uncommon): empty repo; get out quickly and don't bother
322 318 # writing an empty cache.
323 319 if repoheads == [nullid]:
324 320 return ([], {}, valid, {}, False)
325 321
326 322 # Case 3 (uncommon): cache file missing or empty.
327 323
328 324 # Case 4 (uncommon): tip rev decreased. This should only happen
329 325 # when we're called from localrepository.destroyed(). Refresh the
330 326 # cache so future invocations will not see disappeared heads in the
331 327 # cache.
332 328
333 329 # Case 5 (common): tip has changed, so we've added/replaced heads.
334 330
335 331 # As it happens, the code to handle cases 3, 4, 5 is the same.
336 332
337 333 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
338 334 # exposed".
339 335 if not len(repo.file('.hgtags')):
340 336 # No tags have ever been committed, so we can avoid a
341 337 # potentially expensive search.
342 338 return ([], {}, valid, None, True)
343 339
344 340
345 341 # Now we have to lookup the .hgtags filenode for every new head.
346 342 # This is the most expensive part of finding tags, so performance
347 343 # depends primarily on the size of newheads. Worst case: no cache
348 344 # file, so newheads == repoheads.
349 345 cachefnode = _getfnodes(ui, repo, repoheads)
350 346
351 347 # Caller has to iterate over all heads, but can use the filenodes in
352 348 # cachefnode to get to each .hgtags revision quickly.
353 349 return (repoheads, cachefnode, valid, None, True)
354 350
355 351 def _getfnodes(ui, repo, nodes):
356 352 """return .hgtags fnodes for a list of changeset nodes
357 353
358 354 Return value is a {node: fnode} mapping. There will be no entry for nodes
359 355 without a '.hgtags' file.
360 356 """
361 357 starttime = util.timer()
362 358 fnodescache = hgtagsfnodescache(repo.unfiltered())
363 359 cachefnode = {}
364 360 for head in reversed(nodes):
365 361 fnode = fnodescache.getfnode(head)
366 362 if fnode != nullid:
367 363 cachefnode[head] = fnode
368 364
369 365 fnodescache.write()
370 366
371 367 duration = util.timer() - starttime
372 368 ui.log('tagscache',
373 369 '%d/%d cache hits/lookups in %0.4f '
374 370 'seconds\n',
375 371 fnodescache.hitcount, fnodescache.lookupcount, duration)
376 372 return cachefnode
377 373
378 374 def _writetagcache(ui, repo, valid, cachetags):
379 375 filename = _filename(repo)
380 376 try:
381 377 cachefile = repo.vfs(filename, 'w', atomictemp=True)
382 378 except (OSError, IOError):
383 379 return
384 380
385 381 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
386 382 filename, len(cachetags))
387 383
388 384 if valid[2]:
389 385 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
390 386 else:
391 387 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
392 388
393 389 # Tag names in the cache are in UTF-8 -- which is the whole reason
394 390 # we keep them in UTF-8 throughout this module. If we converted
395 391 # them local encoding on input, we would lose info writing them to
396 392 # the cache.
397 393 for (name, (node, hist)) in sorted(cachetags.iteritems()):
398 394 for n in hist:
399 395 cachefile.write("%s %s\n" % (hex(n), name))
400 396 cachefile.write("%s %s\n" % (hex(node), name))
401 397
402 398 try:
403 399 cachefile.close()
404 400 except (OSError, IOError):
405 401 pass
406 402
407 403 def tag(repo, names, node, message, local, user, date, editor=False):
408 404 '''tag a revision with one or more symbolic names.
409 405
410 406 names is a list of strings or, when adding a single tag, names may be a
411 407 string.
412 408
413 409 if local is True, the tags are stored in a per-repository file.
414 410 otherwise, they are stored in the .hgtags file, and a new
415 411 changeset is committed with the change.
416 412
417 413 keyword arguments:
418 414
419 415 local: whether to store tags in non-version-controlled file
420 416 (default False)
421 417
422 418 message: commit message to use if committing
423 419
424 420 user: name of user to use if committing
425 421
426 422 date: date tuple to use if committing'''
427 423
428 424 if not local:
429 425 m = matchmod.exact(repo.root, '', ['.hgtags'])
430 426 if any(repo.status(match=m, unknown=True, ignored=True)):
431 427 raise error.Abort(_('working copy of .hgtags is changed'),
432 428 hint=_('please commit .hgtags manually'))
433 429
434 430 repo.tags() # instantiate the cache
435 431 _tag(repo.unfiltered(), names, node, message, local, user, date,
436 432 editor=editor)
437 433
438 434 def _tag(repo, names, node, message, local, user, date, extra=None,
439 435 editor=False):
440 436 if isinstance(names, str):
441 437 names = (names,)
442 438
443 439 branches = repo.branchmap()
444 440 for name in names:
445 441 repo.hook('pretag', throw=True, node=hex(node), tag=name,
446 442 local=local)
447 443 if name in branches:
448 444 repo.ui.warn(_("warning: tag %s conflicts with existing"
449 445 " branch name\n") % name)
450 446
451 447 def writetags(fp, names, munge, prevtags):
452 448 fp.seek(0, 2)
453 449 if prevtags and prevtags[-1] != '\n':
454 450 fp.write('\n')
455 451 for name in names:
456 452 if munge:
457 453 m = munge(name)
458 454 else:
459 455 m = name
460 456
461 457 if (repo._tagscache.tagtypes and
462 458 name in repo._tagscache.tagtypes):
463 459 old = repo.tags().get(name, nullid)
464 460 fp.write('%s %s\n' % (hex(old), m))
465 461 fp.write('%s %s\n' % (hex(node), m))
466 462 fp.close()
467 463
468 464 prevtags = ''
469 465 if local:
470 466 try:
471 467 fp = repo.vfs('localtags', 'r+')
472 468 except IOError:
473 469 fp = repo.vfs('localtags', 'a')
474 470 else:
475 471 prevtags = fp.read()
476 472
477 473 # local tags are stored in the current charset
478 474 writetags(fp, names, None, prevtags)
479 475 for name in names:
480 476 repo.hook('tag', node=hex(node), tag=name, local=local)
481 477 return
482 478
483 479 try:
484 480 fp = repo.wvfs('.hgtags', 'rb+')
485 481 except IOError as e:
486 482 if e.errno != errno.ENOENT:
487 483 raise
488 484 fp = repo.wvfs('.hgtags', 'ab')
489 485 else:
490 486 prevtags = fp.read()
491 487
492 488 # committed tags are stored in UTF-8
493 489 writetags(fp, names, encoding.fromlocal, prevtags)
494 490
495 491 fp.close()
496 492
497 493 repo.invalidatecaches()
498 494
499 495 if '.hgtags' not in repo.dirstate:
500 496 repo[None].add(['.hgtags'])
501 497
502 498 m = matchmod.exact(repo.root, '', ['.hgtags'])
503 499 tagnode = repo.commit(message, user, date, extra=extra, match=m,
504 500 editor=editor)
505 501
506 502 for name in names:
507 503 repo.hook('tag', node=hex(node), tag=name, local=local)
508 504
509 505 return tagnode
510 506
511 507 _fnodescachefile = 'cache/hgtagsfnodes1'
512 508 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
513 509 _fnodesmissingrec = '\xff' * 24
514 510
515 511 class hgtagsfnodescache(object):
516 512 """Persistent cache mapping revisions to .hgtags filenodes.
517 513
518 514 The cache is an array of records. Each item in the array corresponds to
519 515 a changelog revision. Values in the array contain the first 4 bytes of
520 516 the node hash and the 20 bytes .hgtags filenode for that revision.
521 517
522 518 The first 4 bytes are present as a form of verification. Repository
523 519 stripping and rewriting may change the node at a numeric revision in the
524 520 changelog. The changeset fragment serves as a verifier to detect
525 521 rewriting. This logic is shared with the rev branch cache (see
526 522 branchmap.py).
527 523
528 524 The instance holds in memory the full cache content but entries are
529 525 only parsed on read.
530 526
531 527 Instances behave like lists. ``c[i]`` works where i is a rev or
532 528 changeset node. Missing indexes are populated automatically on access.
533 529 """
534 530 def __init__(self, repo):
535 531 assert repo.filtername is None
536 532
537 533 self._repo = repo
538 534
539 535 # Only for reporting purposes.
540 536 self.lookupcount = 0
541 537 self.hitcount = 0
542 538
543 539
544 540 try:
545 541 data = repo.vfs.read(_fnodescachefile)
546 542 except (OSError, IOError):
547 543 data = ""
548 544 self._raw = bytearray(data)
549 545
550 546 # The end state of self._raw is an array that is of the exact length
551 547 # required to hold a record for every revision in the repository.
552 548 # We truncate or extend the array as necessary. self._dirtyoffset is
553 549 # defined to be the start offset at which we need to write the output
554 550 # file. This offset is also adjusted when new entries are calculated
555 551 # for array members.
556 552 cllen = len(repo.changelog)
557 553 wantedlen = cllen * _fnodesrecsize
558 554 rawlen = len(self._raw)
559 555
560 556 self._dirtyoffset = None
561 557
562 558 if rawlen < wantedlen:
563 559 self._dirtyoffset = rawlen
564 560 self._raw.extend('\xff' * (wantedlen - rawlen))
565 561 elif rawlen > wantedlen:
566 562 # There's no easy way to truncate array instances. This seems
567 563 # slightly less evil than copying a potentially large array slice.
568 564 for i in range(rawlen - wantedlen):
569 565 self._raw.pop()
570 566 self._dirtyoffset = len(self._raw)
571 567
572 568 def getfnode(self, node, computemissing=True):
573 569 """Obtain the filenode of the .hgtags file at a specified revision.
574 570
575 571 If the value is in the cache, the entry will be validated and returned.
576 572 Otherwise, the filenode will be computed and returned unless
577 573 "computemissing" is False, in which case None will be returned without
578 574 any potentially expensive computation being performed.
579 575
580 576 If an .hgtags does not exist at the specified revision, nullid is
581 577 returned.
582 578 """
583 579 ctx = self._repo[node]
584 580 rev = ctx.rev()
585 581
586 582 self.lookupcount += 1
587 583
588 584 offset = rev * _fnodesrecsize
589 585 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
590 586 properprefix = node[0:4]
591 587
592 588 # Validate and return existing entry.
593 589 if record != _fnodesmissingrec:
594 590 fileprefix = record[0:4]
595 591
596 592 if fileprefix == properprefix:
597 593 self.hitcount += 1
598 594 return record[4:]
599 595
600 596 # Fall through.
601 597
602 598 # If we get here, the entry is either missing or invalid.
603 599
604 600 if not computemissing:
605 601 return None
606 602
607 603 # Populate missing entry.
608 604 try:
609 605 fnode = ctx.filenode('.hgtags')
610 606 except error.LookupError:
611 607 # No .hgtags file on this revision.
612 608 fnode = nullid
613 609
614 610 self._writeentry(offset, properprefix, fnode)
615 611 return fnode
616 612
617 613 def setfnode(self, node, fnode):
618 614 """Set the .hgtags filenode for a given changeset."""
619 615 assert len(fnode) == 20
620 616 ctx = self._repo[node]
621 617
622 618 # Do a lookup first to avoid writing if nothing has changed.
623 619 if self.getfnode(ctx.node(), computemissing=False) == fnode:
624 620 return
625 621
626 622 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
627 623
628 624 def _writeentry(self, offset, prefix, fnode):
629 625 # Slices on array instances only accept other array.
630 626 entry = bytearray(prefix + fnode)
631 627 self._raw[offset:offset + _fnodesrecsize] = entry
632 628 # self._dirtyoffset could be None.
633 629 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
634 630
635 631 def write(self):
636 632 """Perform all necessary writes to cache file.
637 633
638 634 This may no-op if no writes are needed or if a write lock could
639 635 not be obtained.
640 636 """
641 637 if self._dirtyoffset is None:
642 638 return
643 639
644 640 data = self._raw[self._dirtyoffset:]
645 641 if not data:
646 642 return
647 643
648 644 repo = self._repo
649 645
650 646 try:
651 647 lock = repo.wlock(wait=False)
652 648 except error.LockError:
653 649 repo.ui.log('tagscache',
654 650 'not writing .hg/%s because lock cannot be acquired\n' %
655 651 (_fnodescachefile))
656 652 return
657 653
658 654 try:
659 655 f = repo.vfs.open(_fnodescachefile, 'ab')
660 656 try:
661 657 # if the file has been truncated
662 658 actualoffset = f.tell()
663 659 if actualoffset < self._dirtyoffset:
664 660 self._dirtyoffset = actualoffset
665 661 data = self._raw[self._dirtyoffset:]
666 662 f.seek(self._dirtyoffset)
667 663 f.truncate()
668 664 repo.ui.log('tagscache',
669 665 'writing %d bytes to %s\n' % (
670 666 len(data), _fnodescachefile))
671 667 f.write(data)
672 668 self._dirtyoffset = None
673 669 finally:
674 670 f.close()
675 671 except (IOError, OSError) as inst:
676 672 repo.ui.log('tagscache',
677 673 "couldn't write %s: %s\n" % (
678 674 _fnodescachefile, inst))
679 675 finally:
680 676 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now