##// END OF EJS Templates
track-tags: introduce first bits of tags tracking during transaction...
Pierre-Yves David -
r31994:b36318e6 default
parent child Browse files
Show More
@@ -1,1986 +1,2032 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'))
118 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'))
252 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'))
254 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 @property
389 389 def wopener(self):
390 390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 391 return self.wvfs
392 392
393 393 @property
394 394 def opener(self):
395 395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 396 return self.vfs
397 397
398 398 def close(self):
399 399 self._writecaches()
400 400
401 401 def _loadextensions(self):
402 402 extensions.loadall(self.ui)
403 403
404 404 def _writecaches(self):
405 405 if self._revbranchcache:
406 406 self._revbranchcache.write()
407 407
408 408 def _restrictcapabilities(self, caps):
409 409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 410 caps = set(caps)
411 411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 413 return caps
414 414
415 415 def _applyopenerreqs(self):
416 416 self.svfs.options = dict((r, 1) for r in self.requirements
417 417 if r in self.openerreqs)
418 418 # experimental config: format.chunkcachesize
419 419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 420 if chunkcachesize is not None:
421 421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 422 # experimental config: format.maxchainlen
423 423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 424 if maxchainlen is not None:
425 425 self.svfs.options['maxchainlen'] = maxchainlen
426 426 # experimental config: format.manifestcachesize
427 427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 428 if manifestcachesize is not None:
429 429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 430 # experimental config: format.aggressivemergedeltas
431 431 aggressivemergedeltas = self.ui.configbool('format',
432 432 'aggressivemergedeltas', False)
433 433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435 435
436 436 for r in self.requirements:
437 437 if r.startswith('exp-compression-'):
438 438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439 439
440 440 def _writerequirements(self):
441 441 scmutil.writerequires(self.vfs, self.requirements)
442 442
443 443 def _checknested(self, path):
444 444 """Determine if path is a legal nested repository."""
445 445 if not path.startswith(self.root):
446 446 return False
447 447 subpath = path[len(self.root) + 1:]
448 448 normsubpath = util.pconvert(subpath)
449 449
450 450 # XXX: Checking against the current working copy is wrong in
451 451 # the sense that it can reject things like
452 452 #
453 453 # $ hg cat -r 10 sub/x.txt
454 454 #
455 455 # if sub/ is no longer a subrepository in the working copy
456 456 # parent revision.
457 457 #
458 458 # However, it can of course also allow things that would have
459 459 # been rejected before, such as the above cat command if sub/
460 460 # is a subrepository now, but was a normal directory before.
461 461 # The old path auditor would have rejected by mistake since it
462 462 # panics when it sees sub/.hg/.
463 463 #
464 464 # All in all, checking against the working copy seems sensible
465 465 # since we want to prevent access to nested repositories on
466 466 # the filesystem *now*.
467 467 ctx = self[None]
468 468 parts = util.splitpath(subpath)
469 469 while parts:
470 470 prefix = '/'.join(parts)
471 471 if prefix in ctx.substate:
472 472 if prefix == normsubpath:
473 473 return True
474 474 else:
475 475 sub = ctx.sub(prefix)
476 476 return sub.checknested(subpath[len(prefix) + 1:])
477 477 else:
478 478 parts.pop()
479 479 return False
480 480
481 481 def peer(self):
482 482 return localpeer(self) # not cached to avoid reference cycle
483 483
484 484 def unfiltered(self):
485 485 """Return unfiltered version of the repository
486 486
487 487 Intended to be overwritten by filtered repo."""
488 488 return self
489 489
490 490 def filtered(self, name):
491 491 """Return a filtered version of a repository"""
492 492 # build a new class with the mixin and the current class
493 493 # (possibly subclass of the repo)
494 494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 495 pass
496 496 return filteredrepo(self, name)
497 497
498 498 @repofilecache('bookmarks', 'bookmarks.current')
499 499 def _bookmarks(self):
500 500 return bookmarks.bmstore(self)
501 501
502 502 @property
503 503 def _activebookmark(self):
504 504 return self._bookmarks.active
505 505
506 506 def bookmarkheads(self, bookmark):
507 507 name = bookmark.split('@', 1)[0]
508 508 heads = []
509 509 for mark, n in self._bookmarks.iteritems():
510 510 if mark.split('@', 1)[0] == name:
511 511 heads.append(n)
512 512 return heads
513 513
514 514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 516 # can't be easily expressed in filecache mechanism.
517 517 @storecache('phaseroots', '00changelog.i')
518 518 def _phasecache(self):
519 519 return phases.phasecache(self, self._phasedefaults)
520 520
521 521 @storecache('obsstore')
522 522 def obsstore(self):
523 523 # read default format for new obsstore.
524 524 # developer config: format.obsstore-version
525 525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 526 # rely on obsstore class default when possible.
527 527 kwargs = {}
528 528 if defaultformat is not None:
529 529 kwargs['defaultformat'] = defaultformat
530 530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 532 **kwargs)
533 533 if store and readonly:
534 534 self.ui.warn(
535 535 _('obsolete feature not enabled but %i markers found!\n')
536 536 % len(list(store)))
537 537 return store
538 538
539 539 @storecache('00changelog.i')
540 540 def changelog(self):
541 541 c = changelog.changelog(self.svfs)
542 542 if txnutil.mayhavepending(self.root):
543 543 c.readpending('00changelog.i.a')
544 544 return c
545 545
546 546 def _constructmanifest(self):
547 547 # This is a temporary function while we migrate from manifest to
548 548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 549 # manifest creation.
550 550 return manifest.manifestrevlog(self.svfs)
551 551
552 552 @storecache('00manifest.i')
553 553 def manifestlog(self):
554 554 return manifest.manifestlog(self.svfs, self)
555 555
556 556 @repofilecache('dirstate')
557 557 def dirstate(self):
558 558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 559 self._dirstatevalidate)
560 560
561 561 def _dirstatevalidate(self, node):
562 562 try:
563 563 self.changelog.rev(node)
564 564 return node
565 565 except error.LookupError:
566 566 if not self._dirstatevalidatewarned:
567 567 self._dirstatevalidatewarned = True
568 568 self.ui.warn(_("warning: ignoring unknown"
569 569 " working parent %s!\n") % short(node))
570 570 return nullid
571 571
572 572 def __getitem__(self, changeid):
573 573 if changeid is None or changeid == wdirrev:
574 574 return context.workingctx(self)
575 575 if isinstance(changeid, slice):
576 576 return [context.changectx(self, i)
577 577 for i in xrange(*changeid.indices(len(self)))
578 578 if i not in self.changelog.filteredrevs]
579 579 return context.changectx(self, changeid)
580 580
581 581 def __contains__(self, changeid):
582 582 try:
583 583 self[changeid]
584 584 return True
585 585 except error.RepoLookupError:
586 586 return False
587 587
588 588 def __nonzero__(self):
589 589 return True
590 590
591 591 __bool__ = __nonzero__
592 592
593 593 def __len__(self):
594 594 return len(self.changelog)
595 595
596 596 def __iter__(self):
597 597 return iter(self.changelog)
598 598
599 599 def revs(self, expr, *args):
600 600 '''Find revisions matching a revset.
601 601
602 602 The revset is specified as a string ``expr`` that may contain
603 603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 604
605 605 Revset aliases from the configuration are not expanded. To expand
606 606 user aliases, consider calling ``scmutil.revrange()`` or
607 607 ``repo.anyrevs([expr], user=True)``.
608 608
609 609 Returns a revset.abstractsmartset, which is a list-like interface
610 610 that contains integer revisions.
611 611 '''
612 612 expr = revsetlang.formatspec(expr, *args)
613 613 m = revset.match(None, expr)
614 614 return m(self)
615 615
616 616 def set(self, expr, *args):
617 617 '''Find revisions matching a revset and emit changectx instances.
618 618
619 619 This is a convenience wrapper around ``revs()`` that iterates the
620 620 result and is a generator of changectx instances.
621 621
622 622 Revset aliases from the configuration are not expanded. To expand
623 623 user aliases, consider calling ``scmutil.revrange()``.
624 624 '''
625 625 for r in self.revs(expr, *args):
626 626 yield self[r]
627 627
628 628 def anyrevs(self, specs, user=False):
629 629 '''Find revisions matching one of the given revsets.
630 630
631 631 Revset aliases from the configuration are not expanded by default. To
632 632 expand user aliases, specify ``user=True``.
633 633 '''
634 634 if user:
635 635 m = revset.matchany(self.ui, specs, repo=self)
636 636 else:
637 637 m = revset.matchany(None, specs)
638 638 return m(self)
639 639
640 640 def url(self):
641 641 return 'file:' + self.root
642 642
643 643 def hook(self, name, throw=False, **args):
644 644 """Call a hook, passing this repo instance.
645 645
646 646 This a convenience method to aid invoking hooks. Extensions likely
647 647 won't call this unless they have registered a custom hook or are
648 648 replacing code that is expected to call a hook.
649 649 """
650 650 return hook.hook(self.ui, self, name, throw, **args)
651 651
652 652 def tag(self, names, node, message, local, user, date, editor=False):
653 653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
654 654 tagsmod.tag(self, names, node, message, local, user, date,
655 655 editor=editor)
656 656
657 657 @filteredpropertycache
658 658 def _tagscache(self):
659 659 '''Returns a tagscache object that contains various tags related
660 660 caches.'''
661 661
662 662 # This simplifies its cache management by having one decorated
663 663 # function (this one) and the rest simply fetch things from it.
664 664 class tagscache(object):
665 665 def __init__(self):
666 666 # These two define the set of tags for this repository. tags
667 667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 668 # 'local'. (Global tags are defined by .hgtags across all
669 669 # heads, and local tags are defined in .hg/localtags.)
670 670 # They constitute the in-memory cache of tags.
671 671 self.tags = self.tagtypes = None
672 672
673 673 self.nodetagscache = self.tagslist = None
674 674
675 675 cache = tagscache()
676 676 cache.tags, cache.tagtypes = self._findtags()
677 677
678 678 return cache
679 679
680 680 def tags(self):
681 681 '''return a mapping of tag to node'''
682 682 t = {}
683 683 if self.changelog.filteredrevs:
684 684 tags, tt = self._findtags()
685 685 else:
686 686 tags = self._tagscache.tags
687 687 for k, v in tags.iteritems():
688 688 try:
689 689 # ignore tags to unknown nodes
690 690 self.changelog.rev(v)
691 691 t[k] = v
692 692 except (error.LookupError, ValueError):
693 693 pass
694 694 return t
695 695
696 696 def _findtags(self):
697 697 '''Do the hard work of finding tags. Return a pair of dicts
698 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 699 maps tag name to a string like \'global\' or \'local\'.
700 700 Subclasses or extensions are free to add their own tags, but
701 701 should be aware that the returned dicts will be retained for the
702 702 duration of the localrepo object.'''
703 703
704 704 # XXX what tagtype should subclasses/extensions use? Currently
705 705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 706 # Should each extension invent its own tag type? Should there
707 707 # be one tagtype for all such "virtual" tags? Or is the status
708 708 # quo fine?
709 709
710 710
711 711 # map tag name to (node, hist)
712 712 alltags = tagsmod.findglobaltags(self.ui, self)
713 713 # map tag name to tag type
714 714 tagtypes = dict((tag, 'global') for tag in alltags)
715 715
716 716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717 717
718 718 # Build the return dicts. Have to re-encode tag names because
719 719 # the tags module always uses UTF-8 (in order not to lose info
720 720 # writing to the cache), but the rest of Mercurial wants them in
721 721 # local encoding.
722 722 tags = {}
723 723 for (name, (node, hist)) in alltags.iteritems():
724 724 if node != nullid:
725 725 tags[encoding.tolocal(name)] = node
726 726 tags['tip'] = self.changelog.tip()
727 727 tagtypes = dict([(encoding.tolocal(name), value)
728 728 for (name, value) in tagtypes.iteritems()])
729 729 return (tags, tagtypes)
730 730
731 731 def tagtype(self, tagname):
732 732 '''
733 733 return the type of the given tag. result can be:
734 734
735 735 'local' : a local tag
736 736 'global' : a global tag
737 737 None : tag does not exist
738 738 '''
739 739
740 740 return self._tagscache.tagtypes.get(tagname)
741 741
742 742 def tagslist(self):
743 743 '''return a list of tags ordered by revision'''
744 744 if not self._tagscache.tagslist:
745 745 l = []
746 746 for t, n in self.tags().iteritems():
747 747 l.append((self.changelog.rev(n), t, n))
748 748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749 749
750 750 return self._tagscache.tagslist
751 751
752 752 def nodetags(self, node):
753 753 '''return the tags associated with a node'''
754 754 if not self._tagscache.nodetagscache:
755 755 nodetagscache = {}
756 756 for t, n in self._tagscache.tags.iteritems():
757 757 nodetagscache.setdefault(n, []).append(t)
758 758 for tags in nodetagscache.itervalues():
759 759 tags.sort()
760 760 self._tagscache.nodetagscache = nodetagscache
761 761 return self._tagscache.nodetagscache.get(node, [])
762 762
763 763 def nodebookmarks(self, node):
764 764 """return the list of bookmarks pointing to the specified node"""
765 765 marks = []
766 766 for bookmark, n in self._bookmarks.iteritems():
767 767 if n == node:
768 768 marks.append(bookmark)
769 769 return sorted(marks)
770 770
771 771 def branchmap(self):
772 772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 773 ordered by increasing revision number'''
774 774 branchmap.updatecache(self)
775 775 return self._branchcaches[self.filtername]
776 776
777 777 @unfilteredmethod
778 778 def revbranchcache(self):
779 779 if not self._revbranchcache:
780 780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 781 return self._revbranchcache
782 782
783 783 def branchtip(self, branch, ignoremissing=False):
784 784 '''return the tip node for a given branch
785 785
786 786 If ignoremissing is True, then this method will not raise an error.
787 787 This is helpful for callers that only expect None for a missing branch
788 788 (e.g. namespace).
789 789
790 790 '''
791 791 try:
792 792 return self.branchmap().branchtip(branch)
793 793 except KeyError:
794 794 if not ignoremissing:
795 795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 796 else:
797 797 pass
798 798
799 799 def lookup(self, key):
800 800 return self[key].node()
801 801
802 802 def lookupbranch(self, key, remote=None):
803 803 repo = remote or self
804 804 if key in repo.branchmap():
805 805 return key
806 806
807 807 repo = (remote and remote.local()) and remote or self
808 808 return repo[key].branch()
809 809
810 810 def known(self, nodes):
811 811 cl = self.changelog
812 812 nm = cl.nodemap
813 813 filtered = cl.filteredrevs
814 814 result = []
815 815 for n in nodes:
816 816 r = nm.get(n)
817 817 resp = not (r is None or r in filtered)
818 818 result.append(resp)
819 819 return result
820 820
821 821 def local(self):
822 822 return self
823 823
824 824 def publishing(self):
825 825 # it's safe (and desirable) to trust the publish flag unconditionally
826 826 # so that we don't finalize changes shared between users via ssh or nfs
827 827 return self.ui.configbool('phases', 'publish', True, untrusted=True)
828 828
829 829 def cancopy(self):
830 830 # so statichttprepo's override of local() works
831 831 if not self.local():
832 832 return False
833 833 if not self.publishing():
834 834 return True
835 835 # if publishing we can't copy if there is filtered content
836 836 return not self.filtered('visible').changelog.filteredrevs
837 837
838 838 def shared(self):
839 839 '''the type of shared repository (None if not shared)'''
840 840 if self.sharedpath != self.path:
841 841 return 'store'
842 842 return None
843 843
844 844 def join(self, f, *insidef):
845 845 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.2')
846 846 return self.vfs.join(os.path.join(f, *insidef))
847 847
848 848 def wjoin(self, f, *insidef):
849 849 return self.vfs.reljoin(self.root, f, *insidef)
850 850
851 851 def file(self, f):
852 852 if f[0] == '/':
853 853 f = f[1:]
854 854 return filelog.filelog(self.svfs, f)
855 855
856 856 def changectx(self, changeid):
857 857 return self[changeid]
858 858
859 859 def setparents(self, p1, p2=nullid):
860 860 self.dirstate.beginparentchange()
861 861 copies = self.dirstate.setparents(p1, p2)
862 862 pctx = self[p1]
863 863 if copies:
864 864 # Adjust copy records, the dirstate cannot do it, it
865 865 # requires access to parents manifests. Preserve them
866 866 # only for entries added to first parent.
867 867 for f in copies:
868 868 if f not in pctx and copies[f] in pctx:
869 869 self.dirstate.copy(copies[f], f)
870 870 if p2 == nullid:
871 871 for f, s in sorted(self.dirstate.copies().items()):
872 872 if f not in pctx and s not in pctx:
873 873 self.dirstate.copy(None, f)
874 874 self.dirstate.endparentchange()
875 875
876 876 def filectx(self, path, changeid=None, fileid=None):
877 877 """changeid can be a changeset revision, node, or tag.
878 878 fileid can be a file revision or node."""
879 879 return context.filectx(self, path, changeid, fileid)
880 880
881 881 def getcwd(self):
882 882 return self.dirstate.getcwd()
883 883
884 884 def pathto(self, f, cwd=None):
885 885 return self.dirstate.pathto(f, cwd)
886 886
887 887 def wfile(self, f, mode='r'):
888 888 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
889 889 return self.wvfs(f, mode)
890 890
891 891 def _link(self, f):
892 892 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
893 893 '4.2')
894 894 return self.wvfs.islink(f)
895 895
896 896 def _loadfilter(self, filter):
897 897 if filter not in self.filterpats:
898 898 l = []
899 899 for pat, cmd in self.ui.configitems(filter):
900 900 if cmd == '!':
901 901 continue
902 902 mf = matchmod.match(self.root, '', [pat])
903 903 fn = None
904 904 params = cmd
905 905 for name, filterfn in self._datafilters.iteritems():
906 906 if cmd.startswith(name):
907 907 fn = filterfn
908 908 params = cmd[len(name):].lstrip()
909 909 break
910 910 if not fn:
911 911 fn = lambda s, c, **kwargs: util.filter(s, c)
912 912 # Wrap old filters not supporting keyword arguments
913 913 if not inspect.getargspec(fn)[2]:
914 914 oldfn = fn
915 915 fn = lambda s, c, **kwargs: oldfn(s, c)
916 916 l.append((mf, fn, params))
917 917 self.filterpats[filter] = l
918 918 return self.filterpats[filter]
919 919
920 920 def _filter(self, filterpats, filename, data):
921 921 for mf, fn, cmd in filterpats:
922 922 if mf(filename):
923 923 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
924 924 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
925 925 break
926 926
927 927 return data
928 928
929 929 @unfilteredpropertycache
930 930 def _encodefilterpats(self):
931 931 return self._loadfilter('encode')
932 932
933 933 @unfilteredpropertycache
934 934 def _decodefilterpats(self):
935 935 return self._loadfilter('decode')
936 936
937 937 def adddatafilter(self, name, filter):
938 938 self._datafilters[name] = filter
939 939
940 940 def wread(self, filename):
941 941 if self.wvfs.islink(filename):
942 942 data = self.wvfs.readlink(filename)
943 943 else:
944 944 data = self.wvfs.read(filename)
945 945 return self._filter(self._encodefilterpats, filename, data)
946 946
947 947 def wwrite(self, filename, data, flags, backgroundclose=False):
948 948 """write ``data`` into ``filename`` in the working directory
949 949
950 950 This returns length of written (maybe decoded) data.
951 951 """
952 952 data = self._filter(self._decodefilterpats, filename, data)
953 953 if 'l' in flags:
954 954 self.wvfs.symlink(data, filename)
955 955 else:
956 956 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
957 957 if 'x' in flags:
958 958 self.wvfs.setflags(filename, False, True)
959 959 return len(data)
960 960
961 961 def wwritedata(self, filename, data):
962 962 return self._filter(self._decodefilterpats, filename, data)
963 963
964 964 def currenttransaction(self):
965 965 """return the current transaction or None if non exists"""
966 966 if self._transref:
967 967 tr = self._transref()
968 968 else:
969 969 tr = None
970 970
971 971 if tr and tr.running():
972 972 return tr
973 973 return None
974 974
975 975 def transaction(self, desc, report=None):
976 976 if (self.ui.configbool('devel', 'all-warnings')
977 977 or self.ui.configbool('devel', 'check-locks')):
978 978 if self._currentlock(self._lockref) is None:
979 979 raise error.ProgrammingError('transaction requires locking')
980 980 tr = self.currenttransaction()
981 981 if tr is not None:
982 982 return tr.nest()
983 983
984 984 # abort here if the journal already exists
985 985 if self.svfs.exists("journal"):
986 986 raise error.RepoError(
987 987 _("abandoned transaction found"),
988 988 hint=_("run 'hg recover' to clean up transaction"))
989 989
990 990 idbase = "%.40f#%f" % (random.random(), time.time())
991 991 ha = hex(hashlib.sha1(idbase).digest())
992 992 txnid = 'TXN:' + ha
993 993 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
994 994
995 995 self._writejournal(desc)
996 996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
997 997 if report:
998 998 rp = report
999 999 else:
1000 1000 rp = self.ui.warn
1001 1001 vfsmap = {'plain': self.vfs} # root of .hg/
1002 1002 # we must avoid cyclic reference between repo and transaction.
1003 1003 reporef = weakref.ref(self)
1004 def validate(tr):
1004 # Code to track tag movement
1005 #
1006 # Since tags are all handled as file content, it is actually quite hard
1007 # to track these movement from a code perspective. So we fallback to a
1008 # tracking at the repository level. One could envision to track changes
1009 # to the '.hgtags' file through changegroup apply but that fails to
1010 # cope with case where transaction expose new heads without changegroup
1011 # being involved (eg: phase movement).
1012 #
1013 # For now, We gate the feature behind a flag since this likely comes
1014 # with performance impacts. The current code run more often than needed
1015 # and do not use caches as much as it could. The current focus is on
1016 # the behavior of the feature so we disable it by default. The flag
1017 # will be removed when we are happy with the performance impact.
1018 tracktags = lambda x: None
1019 # experimental config: experimental.hook-track-tags
1020 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1021 False)
1022 if desc != 'strip' and shouldtracktags:
1023 oldheads = self.changelog.headrevs()
1024 def tracktags(tr2):
1025 repo = reporef()
1026 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1027 newheads = repo.changelog.headrevs()
1028 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1029 # notes: we compare lists here.
1030 # As we do it only once buiding set would not be cheaper
1031 if oldfnodes != newfnodes:
1032 tr2.hookargs['tag_moved'] = '1'
1033 def validate(tr2):
1005 1034 """will run pre-closing hooks"""
1035 # XXX the transaction API is a bit lacking here so we take a hacky
1036 # path for now
1037 #
1038 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1039 # dict is copied before these run. In addition we needs the data
1040 # available to in memory hooks too.
1041 #
1042 # Moreover, we also need to make sure this runs before txnclose
1043 # hooks and there is no "pending" mechanism that would execute
1044 # logic only if hooks are about to run.
1045 #
1046 # Fixing this limitation of the transaction is also needed to track
1047 # other families of changes (bookmarks, phases, obsolescence).
1048 #
1049 # This will have to be fixed before we remove the experimental
1050 # gating.
1051 tracktags(tr2)
1006 1052 reporef().hook('pretxnclose', throw=True,
1007 1053 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1008 1054 def releasefn(tr, success):
1009 1055 repo = reporef()
1010 1056 if success:
1011 1057 # this should be explicitly invoked here, because
1012 1058 # in-memory changes aren't written out at closing
1013 1059 # transaction, if tr.addfilegenerator (via
1014 1060 # dirstate.write or so) isn't invoked while
1015 1061 # transaction running
1016 1062 repo.dirstate.write(None)
1017 1063 else:
1018 1064 # discard all changes (including ones already written
1019 1065 # out) in this transaction
1020 1066 repo.dirstate.restorebackup(None, prefix='journal.')
1021 1067
1022 1068 repo.invalidate(clearfilecache=True)
1023 1069
1024 1070 tr = transaction.transaction(rp, self.svfs, vfsmap,
1025 1071 "journal",
1026 1072 "undo",
1027 1073 aftertrans(renames),
1028 1074 self.store.createmode,
1029 1075 validator=validate,
1030 1076 releasefn=releasefn)
1031 1077
1032 1078 tr.hookargs['txnid'] = txnid
1033 1079 # note: writing the fncache only during finalize mean that the file is
1034 1080 # outdated when running hooks. As fncache is used for streaming clone,
1035 1081 # this is not expected to break anything that happen during the hooks.
1036 1082 tr.addfinalize('flush-fncache', self.store.write)
1037 1083 def txnclosehook(tr2):
1038 1084 """To be run if transaction is successful, will schedule a hook run
1039 1085 """
1040 1086 # Don't reference tr2 in hook() so we don't hold a reference.
1041 1087 # This reduces memory consumption when there are multiple
1042 1088 # transactions per lock. This can likely go away if issue5045
1043 1089 # fixes the function accumulation.
1044 1090 hookargs = tr2.hookargs
1045 1091
1046 1092 def hook():
1047 1093 reporef().hook('txnclose', throw=False, txnname=desc,
1048 1094 **pycompat.strkwargs(hookargs))
1049 1095 reporef()._afterlock(hook)
1050 1096 tr.addfinalize('txnclose-hook', txnclosehook)
1051 1097 def txnaborthook(tr2):
1052 1098 """To be run if transaction is aborted
1053 1099 """
1054 1100 reporef().hook('txnabort', throw=False, txnname=desc,
1055 1101 **tr2.hookargs)
1056 1102 tr.addabort('txnabort-hook', txnaborthook)
1057 1103 # avoid eager cache invalidation. in-memory data should be identical
1058 1104 # to stored data if transaction has no error.
1059 1105 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1060 1106 self._transref = weakref.ref(tr)
1061 1107 return tr
1062 1108
1063 1109 def _journalfiles(self):
1064 1110 return ((self.svfs, 'journal'),
1065 1111 (self.vfs, 'journal.dirstate'),
1066 1112 (self.vfs, 'journal.branch'),
1067 1113 (self.vfs, 'journal.desc'),
1068 1114 (self.vfs, 'journal.bookmarks'),
1069 1115 (self.svfs, 'journal.phaseroots'))
1070 1116
1071 1117 def undofiles(self):
1072 1118 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1073 1119
1074 1120 def _writejournal(self, desc):
1075 1121 self.dirstate.savebackup(None, prefix='journal.')
1076 1122 self.vfs.write("journal.branch",
1077 1123 encoding.fromlocal(self.dirstate.branch()))
1078 1124 self.vfs.write("journal.desc",
1079 1125 "%d\n%s\n" % (len(self), desc))
1080 1126 self.vfs.write("journal.bookmarks",
1081 1127 self.vfs.tryread("bookmarks"))
1082 1128 self.svfs.write("journal.phaseroots",
1083 1129 self.svfs.tryread("phaseroots"))
1084 1130
1085 1131 def recover(self):
1086 1132 with self.lock():
1087 1133 if self.svfs.exists("journal"):
1088 1134 self.ui.status(_("rolling back interrupted transaction\n"))
1089 1135 vfsmap = {'': self.svfs,
1090 1136 'plain': self.vfs,}
1091 1137 transaction.rollback(self.svfs, vfsmap, "journal",
1092 1138 self.ui.warn)
1093 1139 self.invalidate()
1094 1140 return True
1095 1141 else:
1096 1142 self.ui.warn(_("no interrupted transaction available\n"))
1097 1143 return False
1098 1144
1099 1145 def rollback(self, dryrun=False, force=False):
1100 1146 wlock = lock = dsguard = None
1101 1147 try:
1102 1148 wlock = self.wlock()
1103 1149 lock = self.lock()
1104 1150 if self.svfs.exists("undo"):
1105 1151 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1106 1152
1107 1153 return self._rollback(dryrun, force, dsguard)
1108 1154 else:
1109 1155 self.ui.warn(_("no rollback information available\n"))
1110 1156 return 1
1111 1157 finally:
1112 1158 release(dsguard, lock, wlock)
1113 1159
1114 1160 @unfilteredmethod # Until we get smarter cache management
1115 1161 def _rollback(self, dryrun, force, dsguard):
1116 1162 ui = self.ui
1117 1163 try:
1118 1164 args = self.vfs.read('undo.desc').splitlines()
1119 1165 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1120 1166 if len(args) >= 3:
1121 1167 detail = args[2]
1122 1168 oldtip = oldlen - 1
1123 1169
1124 1170 if detail and ui.verbose:
1125 1171 msg = (_('repository tip rolled back to revision %s'
1126 1172 ' (undo %s: %s)\n')
1127 1173 % (oldtip, desc, detail))
1128 1174 else:
1129 1175 msg = (_('repository tip rolled back to revision %s'
1130 1176 ' (undo %s)\n')
1131 1177 % (oldtip, desc))
1132 1178 except IOError:
1133 1179 msg = _('rolling back unknown transaction\n')
1134 1180 desc = None
1135 1181
1136 1182 if not force and self['.'] != self['tip'] and desc == 'commit':
1137 1183 raise error.Abort(
1138 1184 _('rollback of last commit while not checked out '
1139 1185 'may lose data'), hint=_('use -f to force'))
1140 1186
1141 1187 ui.status(msg)
1142 1188 if dryrun:
1143 1189 return 0
1144 1190
1145 1191 parents = self.dirstate.parents()
1146 1192 self.destroying()
1147 1193 vfsmap = {'plain': self.vfs, '': self.svfs}
1148 1194 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1149 1195 if self.vfs.exists('undo.bookmarks'):
1150 1196 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1151 1197 if self.svfs.exists('undo.phaseroots'):
1152 1198 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1153 1199 self.invalidate()
1154 1200
1155 1201 parentgone = (parents[0] not in self.changelog.nodemap or
1156 1202 parents[1] not in self.changelog.nodemap)
1157 1203 if parentgone:
1158 1204 # prevent dirstateguard from overwriting already restored one
1159 1205 dsguard.close()
1160 1206
1161 1207 self.dirstate.restorebackup(None, prefix='undo.')
1162 1208 try:
1163 1209 branch = self.vfs.read('undo.branch')
1164 1210 self.dirstate.setbranch(encoding.tolocal(branch))
1165 1211 except IOError:
1166 1212 ui.warn(_('named branch could not be reset: '
1167 1213 'current branch is still \'%s\'\n')
1168 1214 % self.dirstate.branch())
1169 1215
1170 1216 parents = tuple([p.rev() for p in self[None].parents()])
1171 1217 if len(parents) > 1:
1172 1218 ui.status(_('working directory now based on '
1173 1219 'revisions %d and %d\n') % parents)
1174 1220 else:
1175 1221 ui.status(_('working directory now based on '
1176 1222 'revision %d\n') % parents)
1177 1223 mergemod.mergestate.clean(self, self['.'].node())
1178 1224
1179 1225 # TODO: if we know which new heads may result from this rollback, pass
1180 1226 # them to destroy(), which will prevent the branchhead cache from being
1181 1227 # invalidated.
1182 1228 self.destroyed()
1183 1229 return 0
1184 1230
1185 1231 def invalidatecaches(self):
1186 1232
1187 1233 if '_tagscache' in vars(self):
1188 1234 # can't use delattr on proxy
1189 1235 del self.__dict__['_tagscache']
1190 1236
1191 1237 self.unfiltered()._branchcaches.clear()
1192 1238 self.invalidatevolatilesets()
1193 1239
1194 1240 def invalidatevolatilesets(self):
1195 1241 self.filteredrevcache.clear()
1196 1242 obsolete.clearobscaches(self)
1197 1243
1198 1244 def invalidatedirstate(self):
1199 1245 '''Invalidates the dirstate, causing the next call to dirstate
1200 1246 to check if it was modified since the last time it was read,
1201 1247 rereading it if it has.
1202 1248
1203 1249 This is different to dirstate.invalidate() that it doesn't always
1204 1250 rereads the dirstate. Use dirstate.invalidate() if you want to
1205 1251 explicitly read the dirstate again (i.e. restoring it to a previous
1206 1252 known good state).'''
1207 1253 if hasunfilteredcache(self, 'dirstate'):
1208 1254 for k in self.dirstate._filecache:
1209 1255 try:
1210 1256 delattr(self.dirstate, k)
1211 1257 except AttributeError:
1212 1258 pass
1213 1259 delattr(self.unfiltered(), 'dirstate')
1214 1260
1215 1261 def invalidate(self, clearfilecache=False):
1216 1262 '''Invalidates both store and non-store parts other than dirstate
1217 1263
1218 1264 If a transaction is running, invalidation of store is omitted,
1219 1265 because discarding in-memory changes might cause inconsistency
1220 1266 (e.g. incomplete fncache causes unintentional failure, but
1221 1267 redundant one doesn't).
1222 1268 '''
1223 1269 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 1270 for k in list(self._filecache.keys()):
1225 1271 # dirstate is invalidated separately in invalidatedirstate()
1226 1272 if k == 'dirstate':
1227 1273 continue
1228 1274
1229 1275 if clearfilecache:
1230 1276 del self._filecache[k]
1231 1277 try:
1232 1278 delattr(unfiltered, k)
1233 1279 except AttributeError:
1234 1280 pass
1235 1281 self.invalidatecaches()
1236 1282 if not self.currenttransaction():
1237 1283 # TODO: Changing contents of store outside transaction
1238 1284 # causes inconsistency. We should make in-memory store
1239 1285 # changes detectable, and abort if changed.
1240 1286 self.store.invalidatecaches()
1241 1287
1242 1288 def invalidateall(self):
1243 1289 '''Fully invalidates both store and non-store parts, causing the
1244 1290 subsequent operation to reread any outside changes.'''
1245 1291 # extension should hook this to invalidate its caches
1246 1292 self.invalidate()
1247 1293 self.invalidatedirstate()
1248 1294
1249 1295 @unfilteredmethod
1250 1296 def _refreshfilecachestats(self, tr):
1251 1297 """Reload stats of cached files so that they are flagged as valid"""
1252 1298 for k, ce in self._filecache.items():
1253 1299 if k == 'dirstate' or k not in self.__dict__:
1254 1300 continue
1255 1301 ce.refresh()
1256 1302
1257 1303 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1258 1304 inheritchecker=None, parentenvvar=None):
1259 1305 parentlock = None
1260 1306 # the contents of parentenvvar are used by the underlying lock to
1261 1307 # determine whether it can be inherited
1262 1308 if parentenvvar is not None:
1263 1309 parentlock = encoding.environ.get(parentenvvar)
1264 1310 try:
1265 1311 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1266 1312 acquirefn=acquirefn, desc=desc,
1267 1313 inheritchecker=inheritchecker,
1268 1314 parentlock=parentlock)
1269 1315 except error.LockHeld as inst:
1270 1316 if not wait:
1271 1317 raise
1272 1318 # show more details for new-style locks
1273 1319 if ':' in inst.locker:
1274 1320 host, pid = inst.locker.split(":", 1)
1275 1321 self.ui.warn(
1276 1322 _("waiting for lock on %s held by process %r "
1277 1323 "on host %r\n") % (desc, pid, host))
1278 1324 else:
1279 1325 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1280 1326 (desc, inst.locker))
1281 1327 # default to 600 seconds timeout
1282 1328 l = lockmod.lock(vfs, lockname,
1283 1329 int(self.ui.config("ui", "timeout", "600")),
1284 1330 releasefn=releasefn, acquirefn=acquirefn,
1285 1331 desc=desc)
1286 1332 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1287 1333 return l
1288 1334
1289 1335 def _afterlock(self, callback):
1290 1336 """add a callback to be run when the repository is fully unlocked
1291 1337
1292 1338 The callback will be executed when the outermost lock is released
1293 1339 (with wlock being higher level than 'lock')."""
1294 1340 for ref in (self._wlockref, self._lockref):
1295 1341 l = ref and ref()
1296 1342 if l and l.held:
1297 1343 l.postrelease.append(callback)
1298 1344 break
1299 1345 else: # no lock have been found.
1300 1346 callback()
1301 1347
1302 1348 def lock(self, wait=True):
1303 1349 '''Lock the repository store (.hg/store) and return a weak reference
1304 1350 to the lock. Use this before modifying the store (e.g. committing or
1305 1351 stripping). If you are opening a transaction, get a lock as well.)
1306 1352
1307 1353 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1308 1354 'wlock' first to avoid a dead-lock hazard.'''
1309 1355 l = self._currentlock(self._lockref)
1310 1356 if l is not None:
1311 1357 l.lock()
1312 1358 return l
1313 1359
1314 1360 l = self._lock(self.svfs, "lock", wait, None,
1315 1361 self.invalidate, _('repository %s') % self.origroot)
1316 1362 self._lockref = weakref.ref(l)
1317 1363 return l
1318 1364
1319 1365 def _wlockchecktransaction(self):
1320 1366 if self.currenttransaction() is not None:
1321 1367 raise error.LockInheritanceContractViolation(
1322 1368 'wlock cannot be inherited in the middle of a transaction')
1323 1369
1324 1370 def wlock(self, wait=True):
1325 1371 '''Lock the non-store parts of the repository (everything under
1326 1372 .hg except .hg/store) and return a weak reference to the lock.
1327 1373
1328 1374 Use this before modifying files in .hg.
1329 1375
1330 1376 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1331 1377 'wlock' first to avoid a dead-lock hazard.'''
1332 1378 l = self._wlockref and self._wlockref()
1333 1379 if l is not None and l.held:
1334 1380 l.lock()
1335 1381 return l
1336 1382
1337 1383 # We do not need to check for non-waiting lock acquisition. Such
1338 1384 # acquisition would not cause dead-lock as they would just fail.
1339 1385 if wait and (self.ui.configbool('devel', 'all-warnings')
1340 1386 or self.ui.configbool('devel', 'check-locks')):
1341 1387 if self._currentlock(self._lockref) is not None:
1342 1388 self.ui.develwarn('"wlock" acquired after "lock"')
1343 1389
1344 1390 def unlock():
1345 1391 if self.dirstate.pendingparentchange():
1346 1392 self.dirstate.invalidate()
1347 1393 else:
1348 1394 self.dirstate.write(None)
1349 1395
1350 1396 self._filecache['dirstate'].refresh()
1351 1397
1352 1398 l = self._lock(self.vfs, "wlock", wait, unlock,
1353 1399 self.invalidatedirstate, _('working directory of %s') %
1354 1400 self.origroot,
1355 1401 inheritchecker=self._wlockchecktransaction,
1356 1402 parentenvvar='HG_WLOCK_LOCKER')
1357 1403 self._wlockref = weakref.ref(l)
1358 1404 return l
1359 1405
1360 1406 def _currentlock(self, lockref):
1361 1407 """Returns the lock if it's held, or None if it's not."""
1362 1408 if lockref is None:
1363 1409 return None
1364 1410 l = lockref()
1365 1411 if l is None or not l.held:
1366 1412 return None
1367 1413 return l
1368 1414
1369 1415 def currentwlock(self):
1370 1416 """Returns the wlock if it's held, or None if it's not."""
1371 1417 return self._currentlock(self._wlockref)
1372 1418
1373 1419 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1374 1420 """
1375 1421 commit an individual file as part of a larger transaction
1376 1422 """
1377 1423
1378 1424 fname = fctx.path()
1379 1425 fparent1 = manifest1.get(fname, nullid)
1380 1426 fparent2 = manifest2.get(fname, nullid)
1381 1427 if isinstance(fctx, context.filectx):
1382 1428 node = fctx.filenode()
1383 1429 if node in [fparent1, fparent2]:
1384 1430 self.ui.debug('reusing %s filelog entry\n' % fname)
1385 1431 if manifest1.flags(fname) != fctx.flags():
1386 1432 changelist.append(fname)
1387 1433 return node
1388 1434
1389 1435 flog = self.file(fname)
1390 1436 meta = {}
1391 1437 copy = fctx.renamed()
1392 1438 if copy and copy[0] != fname:
1393 1439 # Mark the new revision of this file as a copy of another
1394 1440 # file. This copy data will effectively act as a parent
1395 1441 # of this new revision. If this is a merge, the first
1396 1442 # parent will be the nullid (meaning "look up the copy data")
1397 1443 # and the second one will be the other parent. For example:
1398 1444 #
1399 1445 # 0 --- 1 --- 3 rev1 changes file foo
1400 1446 # \ / rev2 renames foo to bar and changes it
1401 1447 # \- 2 -/ rev3 should have bar with all changes and
1402 1448 # should record that bar descends from
1403 1449 # bar in rev2 and foo in rev1
1404 1450 #
1405 1451 # this allows this merge to succeed:
1406 1452 #
1407 1453 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1408 1454 # \ / merging rev3 and rev4 should use bar@rev2
1409 1455 # \- 2 --- 4 as the merge base
1410 1456 #
1411 1457
1412 1458 cfname = copy[0]
1413 1459 crev = manifest1.get(cfname)
1414 1460 newfparent = fparent2
1415 1461
1416 1462 if manifest2: # branch merge
1417 1463 if fparent2 == nullid or crev is None: # copied on remote side
1418 1464 if cfname in manifest2:
1419 1465 crev = manifest2[cfname]
1420 1466 newfparent = fparent1
1421 1467
1422 1468 # Here, we used to search backwards through history to try to find
1423 1469 # where the file copy came from if the source of a copy was not in
1424 1470 # the parent directory. However, this doesn't actually make sense to
1425 1471 # do (what does a copy from something not in your working copy even
1426 1472 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1427 1473 # the user that copy information was dropped, so if they didn't
1428 1474 # expect this outcome it can be fixed, but this is the correct
1429 1475 # behavior in this circumstance.
1430 1476
1431 1477 if crev:
1432 1478 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1433 1479 meta["copy"] = cfname
1434 1480 meta["copyrev"] = hex(crev)
1435 1481 fparent1, fparent2 = nullid, newfparent
1436 1482 else:
1437 1483 self.ui.warn(_("warning: can't find ancestor for '%s' "
1438 1484 "copied from '%s'!\n") % (fname, cfname))
1439 1485
1440 1486 elif fparent1 == nullid:
1441 1487 fparent1, fparent2 = fparent2, nullid
1442 1488 elif fparent2 != nullid:
1443 1489 # is one parent an ancestor of the other?
1444 1490 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1445 1491 if fparent1 in fparentancestors:
1446 1492 fparent1, fparent2 = fparent2, nullid
1447 1493 elif fparent2 in fparentancestors:
1448 1494 fparent2 = nullid
1449 1495
1450 1496 # is the file changed?
1451 1497 text = fctx.data()
1452 1498 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1453 1499 changelist.append(fname)
1454 1500 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1455 1501 # are just the flags changed during merge?
1456 1502 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1457 1503 changelist.append(fname)
1458 1504
1459 1505 return fparent1
1460 1506
1461 1507 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1462 1508 """check for commit arguments that aren't committable"""
1463 1509 if match.isexact() or match.prefix():
1464 1510 matched = set(status.modified + status.added + status.removed)
1465 1511
1466 1512 for f in match.files():
1467 1513 f = self.dirstate.normalize(f)
1468 1514 if f == '.' or f in matched or f in wctx.substate:
1469 1515 continue
1470 1516 if f in status.deleted:
1471 1517 fail(f, _('file not found!'))
1472 1518 if f in vdirs: # visited directory
1473 1519 d = f + '/'
1474 1520 for mf in matched:
1475 1521 if mf.startswith(d):
1476 1522 break
1477 1523 else:
1478 1524 fail(f, _("no match under directory!"))
1479 1525 elif f not in self.dirstate:
1480 1526 fail(f, _("file not tracked!"))
1481 1527
1482 1528 @unfilteredmethod
1483 1529 def commit(self, text="", user=None, date=None, match=None, force=False,
1484 1530 editor=False, extra=None):
1485 1531 """Add a new revision to current repository.
1486 1532
1487 1533 Revision information is gathered from the working directory,
1488 1534 match can be used to filter the committed files. If editor is
1489 1535 supplied, it is called to get a commit message.
1490 1536 """
1491 1537 if extra is None:
1492 1538 extra = {}
1493 1539
1494 1540 def fail(f, msg):
1495 1541 raise error.Abort('%s: %s' % (f, msg))
1496 1542
1497 1543 if not match:
1498 1544 match = matchmod.always(self.root, '')
1499 1545
1500 1546 if not force:
1501 1547 vdirs = []
1502 1548 match.explicitdir = vdirs.append
1503 1549 match.bad = fail
1504 1550
1505 1551 wlock = lock = tr = None
1506 1552 try:
1507 1553 wlock = self.wlock()
1508 1554 lock = self.lock() # for recent changelog (see issue4368)
1509 1555
1510 1556 wctx = self[None]
1511 1557 merge = len(wctx.parents()) > 1
1512 1558
1513 1559 if not force and merge and match.ispartial():
1514 1560 raise error.Abort(_('cannot partially commit a merge '
1515 1561 '(do not specify files or patterns)'))
1516 1562
1517 1563 status = self.status(match=match, clean=force)
1518 1564 if force:
1519 1565 status.modified.extend(status.clean) # mq may commit clean files
1520 1566
1521 1567 # check subrepos
1522 1568 subs = []
1523 1569 commitsubs = set()
1524 1570 newstate = wctx.substate.copy()
1525 1571 # only manage subrepos and .hgsubstate if .hgsub is present
1526 1572 if '.hgsub' in wctx:
1527 1573 # we'll decide whether to track this ourselves, thanks
1528 1574 for c in status.modified, status.added, status.removed:
1529 1575 if '.hgsubstate' in c:
1530 1576 c.remove('.hgsubstate')
1531 1577
1532 1578 # compare current state to last committed state
1533 1579 # build new substate based on last committed state
1534 1580 oldstate = wctx.p1().substate
1535 1581 for s in sorted(newstate.keys()):
1536 1582 if not match(s):
1537 1583 # ignore working copy, use old state if present
1538 1584 if s in oldstate:
1539 1585 newstate[s] = oldstate[s]
1540 1586 continue
1541 1587 if not force:
1542 1588 raise error.Abort(
1543 1589 _("commit with new subrepo %s excluded") % s)
1544 1590 dirtyreason = wctx.sub(s).dirtyreason(True)
1545 1591 if dirtyreason:
1546 1592 if not self.ui.configbool('ui', 'commitsubrepos'):
1547 1593 raise error.Abort(dirtyreason,
1548 1594 hint=_("use --subrepos for recursive commit"))
1549 1595 subs.append(s)
1550 1596 commitsubs.add(s)
1551 1597 else:
1552 1598 bs = wctx.sub(s).basestate()
1553 1599 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1554 1600 if oldstate.get(s, (None, None, None))[1] != bs:
1555 1601 subs.append(s)
1556 1602
1557 1603 # check for removed subrepos
1558 1604 for p in wctx.parents():
1559 1605 r = [s for s in p.substate if s not in newstate]
1560 1606 subs += [s for s in r if match(s)]
1561 1607 if subs:
1562 1608 if (not match('.hgsub') and
1563 1609 '.hgsub' in (wctx.modified() + wctx.added())):
1564 1610 raise error.Abort(
1565 1611 _("can't commit subrepos without .hgsub"))
1566 1612 status.modified.insert(0, '.hgsubstate')
1567 1613
1568 1614 elif '.hgsub' in status.removed:
1569 1615 # clean up .hgsubstate when .hgsub is removed
1570 1616 if ('.hgsubstate' in wctx and
1571 1617 '.hgsubstate' not in (status.modified + status.added +
1572 1618 status.removed)):
1573 1619 status.removed.insert(0, '.hgsubstate')
1574 1620
1575 1621 # make sure all explicit patterns are matched
1576 1622 if not force:
1577 1623 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1578 1624
1579 1625 cctx = context.workingcommitctx(self, status,
1580 1626 text, user, date, extra)
1581 1627
1582 1628 # internal config: ui.allowemptycommit
1583 1629 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1584 1630 or extra.get('close') or merge or cctx.files()
1585 1631 or self.ui.configbool('ui', 'allowemptycommit'))
1586 1632 if not allowemptycommit:
1587 1633 return None
1588 1634
1589 1635 if merge and cctx.deleted():
1590 1636 raise error.Abort(_("cannot commit merge with missing files"))
1591 1637
1592 1638 ms = mergemod.mergestate.read(self)
1593 1639 mergeutil.checkunresolved(ms)
1594 1640
1595 1641 if editor:
1596 1642 cctx._text = editor(self, cctx, subs)
1597 1643 edited = (text != cctx._text)
1598 1644
1599 1645 # Save commit message in case this transaction gets rolled back
1600 1646 # (e.g. by a pretxncommit hook). Leave the content alone on
1601 1647 # the assumption that the user will use the same editor again.
1602 1648 msgfn = self.savecommitmessage(cctx._text)
1603 1649
1604 1650 # commit subs and write new state
1605 1651 if subs:
1606 1652 for s in sorted(commitsubs):
1607 1653 sub = wctx.sub(s)
1608 1654 self.ui.status(_('committing subrepository %s\n') %
1609 1655 subrepo.subrelpath(sub))
1610 1656 sr = sub.commit(cctx._text, user, date)
1611 1657 newstate[s] = (newstate[s][0], sr)
1612 1658 subrepo.writestate(self, newstate)
1613 1659
1614 1660 p1, p2 = self.dirstate.parents()
1615 1661 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1616 1662 try:
1617 1663 self.hook("precommit", throw=True, parent1=hookp1,
1618 1664 parent2=hookp2)
1619 1665 tr = self.transaction('commit')
1620 1666 ret = self.commitctx(cctx, True)
1621 1667 except: # re-raises
1622 1668 if edited:
1623 1669 self.ui.write(
1624 1670 _('note: commit message saved in %s\n') % msgfn)
1625 1671 raise
1626 1672 # update bookmarks, dirstate and mergestate
1627 1673 bookmarks.update(self, [p1, p2], ret)
1628 1674 cctx.markcommitted(ret)
1629 1675 ms.reset()
1630 1676 tr.close()
1631 1677
1632 1678 finally:
1633 1679 lockmod.release(tr, lock, wlock)
1634 1680
1635 1681 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1636 1682 # hack for command that use a temporary commit (eg: histedit)
1637 1683 # temporary commit got stripped before hook release
1638 1684 if self.changelog.hasnode(ret):
1639 1685 self.hook("commit", node=node, parent1=parent1,
1640 1686 parent2=parent2)
1641 1687 self._afterlock(commithook)
1642 1688 return ret
1643 1689
1644 1690 @unfilteredmethod
1645 1691 def commitctx(self, ctx, error=False):
1646 1692 """Add a new revision to current repository.
1647 1693 Revision information is passed via the context argument.
1648 1694 """
1649 1695
1650 1696 tr = None
1651 1697 p1, p2 = ctx.p1(), ctx.p2()
1652 1698 user = ctx.user()
1653 1699
1654 1700 lock = self.lock()
1655 1701 try:
1656 1702 tr = self.transaction("commit")
1657 1703 trp = weakref.proxy(tr)
1658 1704
1659 1705 if ctx.manifestnode():
1660 1706 # reuse an existing manifest revision
1661 1707 mn = ctx.manifestnode()
1662 1708 files = ctx.files()
1663 1709 elif ctx.files():
1664 1710 m1ctx = p1.manifestctx()
1665 1711 m2ctx = p2.manifestctx()
1666 1712 mctx = m1ctx.copy()
1667 1713
1668 1714 m = mctx.read()
1669 1715 m1 = m1ctx.read()
1670 1716 m2 = m2ctx.read()
1671 1717
1672 1718 # check in files
1673 1719 added = []
1674 1720 changed = []
1675 1721 removed = list(ctx.removed())
1676 1722 linkrev = len(self)
1677 1723 self.ui.note(_("committing files:\n"))
1678 1724 for f in sorted(ctx.modified() + ctx.added()):
1679 1725 self.ui.note(f + "\n")
1680 1726 try:
1681 1727 fctx = ctx[f]
1682 1728 if fctx is None:
1683 1729 removed.append(f)
1684 1730 else:
1685 1731 added.append(f)
1686 1732 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1687 1733 trp, changed)
1688 1734 m.setflag(f, fctx.flags())
1689 1735 except OSError as inst:
1690 1736 self.ui.warn(_("trouble committing %s!\n") % f)
1691 1737 raise
1692 1738 except IOError as inst:
1693 1739 errcode = getattr(inst, 'errno', errno.ENOENT)
1694 1740 if error or errcode and errcode != errno.ENOENT:
1695 1741 self.ui.warn(_("trouble committing %s!\n") % f)
1696 1742 raise
1697 1743
1698 1744 # update manifest
1699 1745 self.ui.note(_("committing manifest\n"))
1700 1746 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1701 1747 drop = [f for f in removed if f in m]
1702 1748 for f in drop:
1703 1749 del m[f]
1704 1750 mn = mctx.write(trp, linkrev,
1705 1751 p1.manifestnode(), p2.manifestnode(),
1706 1752 added, drop)
1707 1753 files = changed + removed
1708 1754 else:
1709 1755 mn = p1.manifestnode()
1710 1756 files = []
1711 1757
1712 1758 # update changelog
1713 1759 self.ui.note(_("committing changelog\n"))
1714 1760 self.changelog.delayupdate(tr)
1715 1761 n = self.changelog.add(mn, files, ctx.description(),
1716 1762 trp, p1.node(), p2.node(),
1717 1763 user, ctx.date(), ctx.extra().copy())
1718 1764 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1719 1765 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1720 1766 parent2=xp2)
1721 1767 # set the new commit is proper phase
1722 1768 targetphase = subrepo.newcommitphase(self.ui, ctx)
1723 1769 if targetphase:
1724 1770 # retract boundary do not alter parent changeset.
1725 1771 # if a parent have higher the resulting phase will
1726 1772 # be compliant anyway
1727 1773 #
1728 1774 # if minimal phase was 0 we don't need to retract anything
1729 1775 phases.retractboundary(self, tr, targetphase, [n])
1730 1776 tr.close()
1731 1777 branchmap.updatecache(self.filtered('served'))
1732 1778 return n
1733 1779 finally:
1734 1780 if tr:
1735 1781 tr.release()
1736 1782 lock.release()
1737 1783
1738 1784 @unfilteredmethod
1739 1785 def destroying(self):
1740 1786 '''Inform the repository that nodes are about to be destroyed.
1741 1787 Intended for use by strip and rollback, so there's a common
1742 1788 place for anything that has to be done before destroying history.
1743 1789
1744 1790 This is mostly useful for saving state that is in memory and waiting
1745 1791 to be flushed when the current lock is released. Because a call to
1746 1792 destroyed is imminent, the repo will be invalidated causing those
1747 1793 changes to stay in memory (waiting for the next unlock), or vanish
1748 1794 completely.
1749 1795 '''
1750 1796 # When using the same lock to commit and strip, the phasecache is left
1751 1797 # dirty after committing. Then when we strip, the repo is invalidated,
1752 1798 # causing those changes to disappear.
1753 1799 if '_phasecache' in vars(self):
1754 1800 self._phasecache.write()
1755 1801
1756 1802 @unfilteredmethod
1757 1803 def destroyed(self):
1758 1804 '''Inform the repository that nodes have been destroyed.
1759 1805 Intended for use by strip and rollback, so there's a common
1760 1806 place for anything that has to be done after destroying history.
1761 1807 '''
1762 1808 # When one tries to:
1763 1809 # 1) destroy nodes thus calling this method (e.g. strip)
1764 1810 # 2) use phasecache somewhere (e.g. commit)
1765 1811 #
1766 1812 # then 2) will fail because the phasecache contains nodes that were
1767 1813 # removed. We can either remove phasecache from the filecache,
1768 1814 # causing it to reload next time it is accessed, or simply filter
1769 1815 # the removed nodes now and write the updated cache.
1770 1816 self._phasecache.filterunknown(self)
1771 1817 self._phasecache.write()
1772 1818
1773 1819 # update the 'served' branch cache to help read only server process
1774 1820 # Thanks to branchcache collaboration this is done from the nearest
1775 1821 # filtered subset and it is expected to be fast.
1776 1822 branchmap.updatecache(self.filtered('served'))
1777 1823
1778 1824 # Ensure the persistent tag cache is updated. Doing it now
1779 1825 # means that the tag cache only has to worry about destroyed
1780 1826 # heads immediately after a strip/rollback. That in turn
1781 1827 # guarantees that "cachetip == currenttip" (comparing both rev
1782 1828 # and node) always means no nodes have been added or destroyed.
1783 1829
1784 1830 # XXX this is suboptimal when qrefresh'ing: we strip the current
1785 1831 # head, refresh the tag cache, then immediately add a new head.
1786 1832 # But I think doing it this way is necessary for the "instant
1787 1833 # tag cache retrieval" case to work.
1788 1834 self.invalidate()
1789 1835
1790 1836 def walk(self, match, node=None):
1791 1837 '''
1792 1838 walk recursively through the directory tree or a given
1793 1839 changeset, finding all files matched by the match
1794 1840 function
1795 1841 '''
1796 1842 return self[node].walk(match)
1797 1843
1798 1844 def status(self, node1='.', node2=None, match=None,
1799 1845 ignored=False, clean=False, unknown=False,
1800 1846 listsubrepos=False):
1801 1847 '''a convenience method that calls node1.status(node2)'''
1802 1848 return self[node1].status(node2, match, ignored, clean, unknown,
1803 1849 listsubrepos)
1804 1850
1805 1851 def heads(self, start=None):
1806 1852 if start is None:
1807 1853 cl = self.changelog
1808 1854 headrevs = reversed(cl.headrevs())
1809 1855 return [cl.node(rev) for rev in headrevs]
1810 1856
1811 1857 heads = self.changelog.heads(start)
1812 1858 # sort the output in rev descending order
1813 1859 return sorted(heads, key=self.changelog.rev, reverse=True)
1814 1860
1815 1861 def branchheads(self, branch=None, start=None, closed=False):
1816 1862 '''return a (possibly filtered) list of heads for the given branch
1817 1863
1818 1864 Heads are returned in topological order, from newest to oldest.
1819 1865 If branch is None, use the dirstate branch.
1820 1866 If start is not None, return only heads reachable from start.
1821 1867 If closed is True, return heads that are marked as closed as well.
1822 1868 '''
1823 1869 if branch is None:
1824 1870 branch = self[None].branch()
1825 1871 branches = self.branchmap()
1826 1872 if branch not in branches:
1827 1873 return []
1828 1874 # the cache returns heads ordered lowest to highest
1829 1875 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 1876 if start is not None:
1831 1877 # filter out the heads that cannot be reached from startrev
1832 1878 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 1879 bheads = [h for h in bheads if h in fbheads]
1834 1880 return bheads
1835 1881
1836 1882 def branches(self, nodes):
1837 1883 if not nodes:
1838 1884 nodes = [self.changelog.tip()]
1839 1885 b = []
1840 1886 for n in nodes:
1841 1887 t = n
1842 1888 while True:
1843 1889 p = self.changelog.parents(n)
1844 1890 if p[1] != nullid or p[0] == nullid:
1845 1891 b.append((t, n, p[0], p[1]))
1846 1892 break
1847 1893 n = p[0]
1848 1894 return b
1849 1895
1850 1896 def between(self, pairs):
1851 1897 r = []
1852 1898
1853 1899 for top, bottom in pairs:
1854 1900 n, l, i = top, [], 0
1855 1901 f = 1
1856 1902
1857 1903 while n != bottom and n != nullid:
1858 1904 p = self.changelog.parents(n)[0]
1859 1905 if i == f:
1860 1906 l.append(n)
1861 1907 f = f * 2
1862 1908 n = p
1863 1909 i += 1
1864 1910
1865 1911 r.append(l)
1866 1912
1867 1913 return r
1868 1914
1869 1915 def checkpush(self, pushop):
1870 1916 """Extensions can override this function if additional checks have
1871 1917 to be performed before pushing, or call it if they override push
1872 1918 command.
1873 1919 """
1874 1920 pass
1875 1921
1876 1922 @unfilteredpropertycache
1877 1923 def prepushoutgoinghooks(self):
1878 1924 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 1925 methods, which are called before pushing changesets.
1880 1926 """
1881 1927 return util.hooks()
1882 1928
1883 1929 def pushkey(self, namespace, key, old, new):
1884 1930 try:
1885 1931 tr = self.currenttransaction()
1886 1932 hookargs = {}
1887 1933 if tr is not None:
1888 1934 hookargs.update(tr.hookargs)
1889 1935 hookargs['namespace'] = namespace
1890 1936 hookargs['key'] = key
1891 1937 hookargs['old'] = old
1892 1938 hookargs['new'] = new
1893 1939 self.hook('prepushkey', throw=True, **hookargs)
1894 1940 except error.HookAbort as exc:
1895 1941 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 1942 if exc.hint:
1897 1943 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 1944 return False
1899 1945 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 1946 ret = pushkey.push(self, namespace, key, old, new)
1901 1947 def runhook():
1902 1948 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 1949 ret=ret)
1904 1950 self._afterlock(runhook)
1905 1951 return ret
1906 1952
1907 1953 def listkeys(self, namespace):
1908 1954 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 1955 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 1956 values = pushkey.list(self, namespace)
1911 1957 self.hook('listkeys', namespace=namespace, values=values)
1912 1958 return values
1913 1959
1914 1960 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 1961 '''used to test argument passing over the wire'''
1916 1962 return "%s %s %s %s %s" % (one, two, three, four, five)
1917 1963
1918 1964 def savecommitmessage(self, text):
1919 1965 fp = self.vfs('last-message.txt', 'wb')
1920 1966 try:
1921 1967 fp.write(text)
1922 1968 finally:
1923 1969 fp.close()
1924 1970 return self.pathto(fp.name[len(self.root) + 1:])
1925 1971
1926 1972 # used to avoid circular references so destructors work
1927 1973 def aftertrans(files):
1928 1974 renamefiles = [tuple(t) for t in files]
1929 1975 def a():
1930 1976 for vfs, src, dest in renamefiles:
1931 1977 # if src and dest refer to a same file, vfs.rename is a no-op,
1932 1978 # leaving both src and dest on disk. delete dest to make sure
1933 1979 # the rename couldn't be such a no-op.
1934 1980 vfs.tryunlink(dest)
1935 1981 try:
1936 1982 vfs.rename(src, dest)
1937 1983 except OSError: # journal file does not yet exist
1938 1984 pass
1939 1985 return a
1940 1986
1941 1987 def undoname(fn):
1942 1988 base, name = os.path.split(fn)
1943 1989 assert name.startswith('journal')
1944 1990 return os.path.join(base, name.replace('journal', 'undo', 1))
1945 1991
1946 1992 def instance(ui, path, create):
1947 1993 return localrepository(ui, util.urllocalpath(path), create)
1948 1994
1949 1995 def islocal(path):
1950 1996 return True
1951 1997
1952 1998 def newreporequirements(repo):
1953 1999 """Determine the set of requirements for a new local repository.
1954 2000
1955 2001 Extensions can wrap this function to specify custom requirements for
1956 2002 new repositories.
1957 2003 """
1958 2004 ui = repo.ui
1959 2005 requirements = set(['revlogv1'])
1960 2006 if ui.configbool('format', 'usestore', True):
1961 2007 requirements.add('store')
1962 2008 if ui.configbool('format', 'usefncache', True):
1963 2009 requirements.add('fncache')
1964 2010 if ui.configbool('format', 'dotencode', True):
1965 2011 requirements.add('dotencode')
1966 2012
1967 2013 compengine = ui.config('experimental', 'format.compression', 'zlib')
1968 2014 if compengine not in util.compengines:
1969 2015 raise error.Abort(_('compression engine %s defined by '
1970 2016 'experimental.format.compression not available') %
1971 2017 compengine,
1972 2018 hint=_('run "hg debuginstall" to list available '
1973 2019 'compression engines'))
1974 2020
1975 2021 # zlib is the historical default and doesn't need an explicit requirement.
1976 2022 if compengine != 'zlib':
1977 2023 requirements.add('exp-compression-%s' % compengine)
1978 2024
1979 2025 if scmutil.gdinitconfig(ui):
1980 2026 requirements.add('generaldelta')
1981 2027 if ui.configbool('experimental', 'treemanifest', False):
1982 2028 requirements.add('treemanifest')
1983 2029 if ui.configbool('experimental', 'manifestv2', False):
1984 2030 requirements.add('manifestv2')
1985 2031
1986 2032 return requirements
@@ -1,646 +1,696 b''
1 $ cat >> $HGRCPATH << EOF
2 > [experimental]
3 > hook-track-tags=1
4 > [hooks]
5 > txnclose.track-tag=${TESTTMP}/taghook.sh
6 > EOF
7
8 $ cat << EOF > taghook.sh
9 > #!/bin/sh
10 > # escape the "$" otherwise the test runner interpret it when writting the
11 > # file...
12 > if [ -n "\$HG_TAG_MOVED" ]; then
13 > echo 'hook: tag changes detected'
14 > fi
15 > EOF
16 $ chmod +x taghook.sh
1 17 $ hg init test
2 18 $ cd test
3 19
4 20 $ echo a > a
5 21 $ hg add a
6 22 $ hg commit -m "test"
7 23 $ hg history
8 24 changeset: 0:acb14030fe0a
9 25 tag: tip
10 26 user: test
11 27 date: Thu Jan 01 00:00:00 1970 +0000
12 28 summary: test
13 29
14 30
15 31 $ hg tag ' '
16 32 abort: tag names cannot consist entirely of whitespace
17 33 [255]
18 34
19 35 (this tests also that editor is not invoked, if '--edit' is not
20 36 specified)
21 37
22 38 $ HGEDITOR=cat hg tag "bleah"
39 hook: tag changes detected
23 40 $ hg history
24 41 changeset: 1:d4f0d2909abc
25 42 tag: tip
26 43 user: test
27 44 date: Thu Jan 01 00:00:00 1970 +0000
28 45 summary: Added tag bleah for changeset acb14030fe0a
29 46
30 47 changeset: 0:acb14030fe0a
31 48 tag: bleah
32 49 user: test
33 50 date: Thu Jan 01 00:00:00 1970 +0000
34 51 summary: test
35 52
36 53
37 54 $ echo foo >> .hgtags
38 55 $ hg tag "bleah2"
39 56 abort: working copy of .hgtags is changed
40 57 (please commit .hgtags manually)
41 58 [255]
42 59
43 60 $ hg revert .hgtags
44 61 $ hg tag -r 0 x y z y y z
45 62 abort: tag names must be unique
46 63 [255]
47 64 $ hg tag tap nada dot tip
48 65 abort: the name 'tip' is reserved
49 66 [255]
50 67 $ hg tag .
51 68 abort: the name '.' is reserved
52 69 [255]
53 70 $ hg tag null
54 71 abort: the name 'null' is reserved
55 72 [255]
56 73 $ hg tag "bleah"
57 74 abort: tag 'bleah' already exists (use -f to force)
58 75 [255]
59 76 $ hg tag "blecch" "bleah"
60 77 abort: tag 'bleah' already exists (use -f to force)
61 78 [255]
62 79
63 80 $ hg tag --remove "blecch"
64 81 abort: tag 'blecch' does not exist
65 82 [255]
66 83 $ hg tag --remove "bleah" "blecch" "blough"
67 84 abort: tag 'blecch' does not exist
68 85 [255]
69 86
70 87 $ hg tag -r 0 "bleah0"
88 hook: tag changes detected
71 89 $ hg tag -l -r 1 "bleah1"
72 90 $ hg tag gack gawk gorp
91 hook: tag changes detected
73 92 $ hg tag -f gack
93 hook: tag changes detected
74 94 $ hg tag --remove gack gorp
95 hook: tag changes detected
75 96
76 97 $ hg tag "bleah "
77 98 abort: tag 'bleah' already exists (use -f to force)
78 99 [255]
79 100 $ hg tag " bleah"
80 101 abort: tag 'bleah' already exists (use -f to force)
81 102 [255]
82 103 $ hg tag " bleah"
83 104 abort: tag 'bleah' already exists (use -f to force)
84 105 [255]
85 106 $ hg tag -r 0 " bleahbleah "
107 hook: tag changes detected
86 108 $ hg tag -r 0 " bleah bleah "
109 hook: tag changes detected
87 110
88 111 $ cat .hgtags
89 112 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
90 113 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
91 114 336fccc858a4eb69609a291105009e484a6b6b8d gack
92 115 336fccc858a4eb69609a291105009e484a6b6b8d gawk
93 116 336fccc858a4eb69609a291105009e484a6b6b8d gorp
94 117 336fccc858a4eb69609a291105009e484a6b6b8d gack
95 118 799667b6f2d9b957f73fa644a918c2df22bab58f gack
96 119 799667b6f2d9b957f73fa644a918c2df22bab58f gack
97 120 0000000000000000000000000000000000000000 gack
98 121 336fccc858a4eb69609a291105009e484a6b6b8d gorp
99 122 0000000000000000000000000000000000000000 gorp
100 123 acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
101 124 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
102 125
103 126 $ cat .hg/localtags
104 127 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
105 128
106 129 tagging on a non-head revision
107 130
108 131 $ hg update 0
109 132 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
110 133 $ hg tag -l localblah
111 134 $ hg tag "foobar"
112 135 abort: working directory is not at a branch head (use -f to force)
113 136 [255]
114 137 $ hg tag -f "foobar"
138 hook: tag changes detected
115 139 $ cat .hgtags
116 140 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
117 141 $ cat .hg/localtags
118 142 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
119 143 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
120 144
121 145 $ hg tag -l 'xx
122 146 > newline'
123 147 abort: '\n' cannot be used in a name
124 148 [255]
125 149 $ hg tag -l 'xx:xx'
126 150 abort: ':' cannot be used in a name
127 151 [255]
128 152
129 153 cloning local tags
130 154
131 155 $ cd ..
132 156 $ hg -R test log -r0:5
133 157 changeset: 0:acb14030fe0a
134 158 tag: bleah
135 159 tag: bleah bleah
136 160 tag: bleah0
137 161 tag: bleahbleah
138 162 tag: foobar
139 163 tag: localblah
140 164 user: test
141 165 date: Thu Jan 01 00:00:00 1970 +0000
142 166 summary: test
143 167
144 168 changeset: 1:d4f0d2909abc
145 169 tag: bleah1
146 170 user: test
147 171 date: Thu Jan 01 00:00:00 1970 +0000
148 172 summary: Added tag bleah for changeset acb14030fe0a
149 173
150 174 changeset: 2:336fccc858a4
151 175 tag: gawk
152 176 user: test
153 177 date: Thu Jan 01 00:00:00 1970 +0000
154 178 summary: Added tag bleah0 for changeset acb14030fe0a
155 179
156 180 changeset: 3:799667b6f2d9
157 181 user: test
158 182 date: Thu Jan 01 00:00:00 1970 +0000
159 183 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
160 184
161 185 changeset: 4:154eeb7c0138
162 186 user: test
163 187 date: Thu Jan 01 00:00:00 1970 +0000
164 188 summary: Added tag gack for changeset 799667b6f2d9
165 189
166 190 changeset: 5:b4bb47aaff09
167 191 user: test
168 192 date: Thu Jan 01 00:00:00 1970 +0000
169 193 summary: Removed tag gack, gorp
170 194
171 195 $ hg clone -q -rbleah1 test test1
196 hook: tag changes detected
172 197 $ hg -R test1 parents --style=compact
173 198 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
174 199 Added tag bleah for changeset acb14030fe0a
175 200
176 201 $ hg clone -q -r5 test#bleah1 test2
202 hook: tag changes detected
177 203 $ hg -R test2 parents --style=compact
178 204 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
179 205 Removed tag gack, gorp
180 206
181 207 $ hg clone -q -U test#bleah1 test3
208 hook: tag changes detected
182 209 $ hg -R test3 parents --style=compact
183 210
184 211 $ cd test
185 212
186 213 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
187 214 doesn't end with EOL
188 215
189 216 $ python << EOF
190 217 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
191 218 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
192 219 > EOF
193 220 $ cat .hg/localtags; echo
194 221 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
195 222 $ hg tag -l localnewline
196 223 $ cat .hg/localtags; echo
197 224 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
198 225 c2899151f4e76890c602a2597a650a72666681bf localnewline
199 226
200 227
201 228 $ python << EOF
202 229 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
203 230 > f = file('.hgtags', 'w'); f.write(last); f.close()
204 231 > EOF
205 232 $ hg ci -m'broken manual edit of .hgtags'
233 hook: tag changes detected
206 234 $ cat .hgtags; echo
207 235 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
208 236 $ hg tag newline
237 hook: tag changes detected
209 238 $ cat .hgtags; echo
210 239 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
211 240 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
212 241
213 242
214 243 tag and branch using same name
215 244
216 245 $ hg branch tag-and-branch-same-name
217 246 marked working directory as branch tag-and-branch-same-name
218 247 (branches are permanent and global, did you want a bookmark?)
219 248 $ hg ci -m"discouraged"
220 249 $ hg tag tag-and-branch-same-name
221 250 warning: tag tag-and-branch-same-name conflicts with existing branch name
251 hook: tag changes detected
222 252
223 253 test custom commit messages
224 254
225 255 $ cat > editor.sh << '__EOF__'
226 256 > echo "==== before editing"
227 257 > cat "$1"
228 258 > echo "===="
229 259 > echo "custom tag message" > "$1"
230 260 > echo "second line" >> "$1"
231 261 > __EOF__
232 262
233 263 at first, test saving last-message.txt
234 264
235 265 (test that editor is not invoked before transaction starting)
236 266
237 267 $ cat > .hg/hgrc << '__EOF__'
238 268 > [hooks]
239 269 > # this failure occurs before editor invocation
240 270 > pretag.test-saving-lastmessage = false
241 271 > __EOF__
242 272 $ rm -f .hg/last-message.txt
243 273 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
244 274 abort: pretag.test-saving-lastmessage hook exited with status 1
245 275 [255]
246 276 $ test -f .hg/last-message.txt
247 277 [1]
248 278
249 279 (test that editor is invoked and commit message is saved into
250 280 "last-message.txt")
251 281
252 282 $ cat >> .hg/hgrc << '__EOF__'
253 283 > [hooks]
254 284 > pretag.test-saving-lastmessage =
255 285 > # this failure occurs after editor invocation
256 286 > pretxncommit.unexpectedabort = false
257 287 > __EOF__
258 288
259 289 (this tests also that editor is invoked, if '--edit' is specified,
260 290 regardless of '--message')
261 291
262 292 $ rm -f .hg/last-message.txt
263 293 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e -m "foo bar"
264 294 ==== before editing
265 295 foo bar
266 296
267 297
268 298 HG: Enter commit message. Lines beginning with 'HG:' are removed.
269 299 HG: Leave message empty to abort commit.
270 300 HG: --
271 301 HG: user: test
272 302 HG: branch 'tag-and-branch-same-name'
273 303 HG: changed .hgtags
274 304 ====
275 305 note: commit message saved in .hg/last-message.txt
276 306 transaction abort!
277 307 rollback completed
278 308 abort: pretxncommit.unexpectedabort hook exited with status 1
279 309 [255]
280 310 $ cat .hg/last-message.txt
281 311 custom tag message
282 312 second line
283 313
284 314 $ cat >> .hg/hgrc << '__EOF__'
285 315 > [hooks]
286 316 > pretxncommit.unexpectedabort =
287 317 > __EOF__
288 318 $ hg status .hgtags
289 319 M .hgtags
290 320 $ hg revert --no-backup -q .hgtags
291 321
292 322 then, test custom commit message itself
293 323
294 324 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
295 325 ==== before editing
296 326 Added tag custom-tag for changeset 75a534207be6
297 327
298 328
299 329 HG: Enter commit message. Lines beginning with 'HG:' are removed.
300 330 HG: Leave message empty to abort commit.
301 331 HG: --
302 332 HG: user: test
303 333 HG: branch 'tag-and-branch-same-name'
304 334 HG: changed .hgtags
305 335 ====
336 hook: tag changes detected
306 337 $ hg log -l1 --template "{desc}\n"
307 338 custom tag message
308 339 second line
309 340
310 341
311 342 local tag with .hgtags modified
312 343
313 344 $ hg tag hgtags-modified
345 hook: tag changes detected
314 346 $ hg rollback
315 347 repository tip rolled back to revision 13 (undo commit)
316 348 working directory now based on revision 13
317 349 $ hg st
318 350 M .hgtags
319 351 ? .hgtags.orig
320 352 ? editor.sh
321 353 $ hg tag --local baz
322 354 $ hg revert --no-backup .hgtags
323 355
324 356
325 357 tagging when at named-branch-head that's not a topo-head
326 358
327 359 $ hg up default
328 360 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
329 361 $ hg merge -t internal:local
330 362 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
331 363 (branch merge, don't forget to commit)
332 364 $ hg ci -m 'merge named branch'
365 hook: tag changes detected
333 366 $ hg up 13
334 367 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 368 $ hg tag new-topo-head
369 hook: tag changes detected
336 370
337 371 tagging on null rev
338 372
339 373 $ hg up null
340 374 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
341 375 $ hg tag nullrev
342 376 abort: working directory is not at a branch head (use -f to force)
343 377 [255]
344 378
345 379 $ hg init empty
346 380 $ hg tag -R empty nullrev
347 381 abort: cannot tag null revision
348 382 [255]
349 383
350 384 $ hg tag -R empty -r 00000000000 -f nulltag
351 385 abort: cannot tag null revision
352 386 [255]
353 387
354 388 $ cd ..
355 389
356 390 tagging on an uncommitted merge (issue2542)
357 391
358 392 $ hg init repo-tag-uncommitted-merge
359 393 $ cd repo-tag-uncommitted-merge
360 394 $ echo c1 > f1
361 395 $ hg ci -Am0
362 396 adding f1
363 397 $ echo c2 > f2
364 398 $ hg ci -Am1
365 399 adding f2
366 400 $ hg co -q 0
367 401 $ hg branch b1
368 402 marked working directory as branch b1
369 403 (branches are permanent and global, did you want a bookmark?)
370 404 $ hg ci -m2
371 405 $ hg up default
372 406 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
373 407 $ hg merge b1
374 408 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
375 409 (branch merge, don't forget to commit)
376 410
377 411 $ hg tag t1
378 412 abort: uncommitted merge
379 413 [255]
380 414 $ hg status
381 415 $ hg tag --rev 1 t2
382 416 abort: uncommitted merge
383 417 [255]
384 418 $ hg tag --rev 1 --local t3
385 419 $ hg tags -v
386 420 tip 2:2a156e8887cc
387 421 t3 1:c3adabd1a5f4 local
388 422
389 423 $ cd ..
390 424
391 425 commit hook on tag used to be run without write lock - issue3344
392 426
393 427 $ hg init repo-tag
394 428 $ touch repo-tag/test
395 429 $ hg -R repo-tag commit -A -m "test"
396 430 adding test
397 431 $ hg init repo-tag-target
398 432 $ cat > "$TESTTMP/issue3344.sh" <<EOF
399 433 > hg push "$TESTTMP/repo-tag-target"
400 434 > EOF
401 435 $ hg -R repo-tag --config hooks.commit="sh ../issue3344.sh" tag tag
436 hook: tag changes detected
402 437 pushing to $TESTTMP/repo-tag-target (glob)
403 438 searching for changes
404 439 adding changesets
405 440 adding manifests
406 441 adding file changes
407 442 added 2 changesets with 2 changes to 2 files
443 hook: tag changes detected
408 444
409 445 automatically merge resolvable tag conflicts (i.e. tags that differ in rank)
410 446 create two clones with some different tags as well as some common tags
411 447 check that we can merge tags that differ in rank
412 448
413 449 $ hg init repo-automatic-tag-merge
414 450 $ cd repo-automatic-tag-merge
415 451 $ echo c0 > f0
416 452 $ hg ci -A -m0
417 453 adding f0
418 454 $ hg tag tbase
455 hook: tag changes detected
419 456 $ hg up -qr '.^'
420 457 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
421 458 1
422 459 $ hg up -q
423 460 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
424 461 2
425 462 $ cd ..
426 463 $ hg clone repo-automatic-tag-merge repo-automatic-tag-merge-clone
427 464 updating to branch default
428 465 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
429 466 $ cd repo-automatic-tag-merge-clone
430 467 $ echo c1 > f1
431 468 $ hg ci -A -m1
432 469 adding f1
433 470 $ hg tag t1 t2 t3
471 hook: tag changes detected
434 472 $ hg tag --remove t2
473 hook: tag changes detected
435 474 $ hg tag t5
475 hook: tag changes detected
436 476 $ echo c2 > f2
437 477 $ hg ci -A -m2
438 478 adding f2
439 479 $ hg tag -f t3
480 hook: tag changes detected
440 481
441 482 $ cd ../repo-automatic-tag-merge
442 483 $ echo c3 > f3
443 484 $ hg ci -A -m3
444 485 adding f3
445 486 $ hg tag -f t4 t5 t6
487 hook: tag changes detected
446 488
447 489 $ hg up -q '.^'
448 490 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
449 491 1 changes since t4:t5:t6
450 492 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
451 493 0 changes since t4:t5:t6
452 494 $ echo c5 > f3
453 495 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
454 496 1 changes since t4:t5:t6
455 497 $ hg up -qC
456 498
457 499 $ hg tag --remove t5
500 hook: tag changes detected
458 501 $ echo c4 > f4
459 502 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
460 503 2 changes since t4:t6
461 504 $ hg log -r '.' -T "{latesttag % '{latesttag}\n'}"
462 505 t4
463 506 t6
464 507 $ hg log -r '.' -T "{latesttag('t4') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
465 508 T: t4, C: 2, D: 2
466 509 $ hg log -r '.' -T "{latesttag('re:\d') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
467 510 T: t4, C: 2, D: 2
468 511 T: t6, C: 2, D: 2
469 512 $ hg log -r . -T '{join(latesttag(), "*")}\n'
470 513 t4*t6
471 514 $ hg ci -A -m4
472 515 adding f4
473 516 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
474 517 4 changes since t4:t6
475 518 $ hg tag t2
519 hook: tag changes detected
476 520 $ hg tag -f t6
521 hook: tag changes detected
477 522
478 523 $ cd ../repo-automatic-tag-merge-clone
479 524 $ hg pull
480 525 pulling from $TESTTMP/repo-automatic-tag-merge (glob)
481 526 searching for changes
482 527 adding changesets
483 528 adding manifests
484 529 adding file changes
485 530 added 6 changesets with 6 changes to 3 files (+1 heads)
531 hook: tag changes detected
486 532 (run 'hg heads' to see heads, 'hg merge' to merge)
487 533 $ hg merge --tool internal:tagmerge
488 534 merging .hgtags
489 535 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
490 536 (branch merge, don't forget to commit)
491 537 $ hg status
492 538 M .hgtags
493 539 M f3
494 540 M f4
495 541 $ hg resolve -l
496 542 R .hgtags
497 543 $ cat .hgtags
498 544 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
499 545 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
500 546 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
501 547 09af2ce14077a94effef208b49a718f4836d4338 t6
502 548 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
503 549 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
504 550 929bca7b18d067cbf3844c3896319a940059d748 t2
505 551 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
506 552 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
507 553 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
508 554 0000000000000000000000000000000000000000 t2
509 555 875517b4806a848f942811a315a5bce30804ae85 t5
510 556 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
511 557 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
512 558 0000000000000000000000000000000000000000 t5
513 559 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
514 560 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
515 561
516 562 check that the merge tried to minimize the diff with the first merge parent
517 563
518 564 $ hg diff --git -r 'p1()' .hgtags
519 565 diff --git a/.hgtags b/.hgtags
520 566 --- a/.hgtags
521 567 +++ b/.hgtags
522 568 @@ -1,9 +1,17 @@
523 569 +9aa4e1292a27a248f8d07339bed9931d54907be7 t4
524 570 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
525 571 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
526 572 +09af2ce14077a94effef208b49a718f4836d4338 t6
527 573 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
528 574 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
529 575 +929bca7b18d067cbf3844c3896319a940059d748 t2
530 576 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
531 577 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
532 578 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
533 579 0000000000000000000000000000000000000000 t2
534 580 875517b4806a848f942811a315a5bce30804ae85 t5
535 581 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
536 582 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
537 583 +0000000000000000000000000000000000000000 t5
538 584 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
539 585 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
540 586
541 587 detect merge tag conflicts
542 588
543 589 $ hg update -C -r tip
544 590 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
545 591 $ hg tag t7
592 hook: tag changes detected
546 593 $ hg update -C -r 'first(sort(head()))'
547 594 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
548 595 $ printf "%s %s\n" `hg log -r . --template "{node} t7"` >> .hgtags
549 596 $ hg commit -m "manually add conflicting t7 tag"
597 hook: tag changes detected
550 598 $ hg merge --tool internal:tagmerge
551 599 merging .hgtags
552 600 automatic .hgtags merge failed
553 601 the following 1 tags are in conflict: t7
554 602 automatic tag merging of .hgtags failed! (use 'hg resolve --tool :merge' or another merge tool of your choice)
555 603 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
556 604 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
557 605 [1]
558 606 $ hg resolve -l
559 607 U .hgtags
560 608 $ cat .hgtags
561 609 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
562 610 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
563 611 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
564 612 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
565 613 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
566 614 0000000000000000000000000000000000000000 t2
567 615 875517b4806a848f942811a315a5bce30804ae85 t5
568 616 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
569 617 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
570 618 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
571 619
572 620 $ cd ..
573 621
574 622 handle the loss of tags
575 623
576 624 $ hg clone repo-automatic-tag-merge-clone repo-merge-lost-tags
577 625 updating to branch default
578 626 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 627 $ cd repo-merge-lost-tags
580 628 $ echo c5 > f5
581 629 $ hg ci -A -m5
582 630 adding f5
583 631 $ hg tag -f t7
632 hook: tag changes detected
584 633 $ hg update -r 'p1(t7)'
585 634 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
586 635 $ printf '' > .hgtags
587 636 $ hg commit -m 'delete all tags'
588 637 created new head
638 hook: tag changes detected
589 639 $ hg log -r 'max(t7::)'
590 640 changeset: 17:ffe462b50880
591 641 user: test
592 642 date: Thu Jan 01 00:00:00 1970 +0000
593 643 summary: Added tag t7 for changeset fd3a9e394ce3
594 644
595 645 $ hg update -r 'max(t7::)'
596 646 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
597 647 $ hg merge -r tip --tool internal:tagmerge
598 648 merging .hgtags
599 649 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
600 650 (branch merge, don't forget to commit)
601 651 $ hg resolve -l
602 652 R .hgtags
603 653 $ cat .hgtags
604 654 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
605 655 0000000000000000000000000000000000000000 tbase
606 656 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
607 657 0000000000000000000000000000000000000000 t1
608 658 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
609 659 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
610 660 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
611 661 0000000000000000000000000000000000000000 t2
612 662 875517b4806a848f942811a315a5bce30804ae85 t5
613 663 0000000000000000000000000000000000000000 t5
614 664 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
615 665 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
616 666 0000000000000000000000000000000000000000 t3
617 667 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
618 668 0000000000000000000000000000000000000000 t7
619 669 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
620 670 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
621 671
622 672 also check that we minimize the diff with the 1st merge parent
623 673
624 674 $ hg diff --git -r 'p1()' .hgtags
625 675 diff --git a/.hgtags b/.hgtags
626 676 --- a/.hgtags
627 677 +++ b/.hgtags
628 678 @@ -1,12 +1,17 @@
629 679 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
630 680 +0000000000000000000000000000000000000000 tbase
631 681 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
632 682 +0000000000000000000000000000000000000000 t1
633 683 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
634 684 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
635 685 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
636 686 0000000000000000000000000000000000000000 t2
637 687 875517b4806a848f942811a315a5bce30804ae85 t5
638 688 +0000000000000000000000000000000000000000 t5
639 689 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
640 690 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
641 691 +0000000000000000000000000000000000000000 t3
642 692 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
643 693 +0000000000000000000000000000000000000000 t7
644 694 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
645 695 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
646 696
General Comments 0
You need to be logged in to leave comments. Login now