##// END OF EJS Templates
discovery: properly filter changeset in 'peer.known' (issue4982)...
Pierre-Yves David -
r27319:b64b6fdc default
parent child Browse files
Show More
@@ -1,1921 +1,1922 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return cg.apply(self._repo, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
218 218 realfs=False)
219 219 self.vfs = scmutil.vfs(self.path)
220 220 self.opener = self.vfs
221 221 self.baseui = baseui
222 222 self.ui = baseui.copy()
223 223 self.ui.copy = baseui.copy # prevent copying repo configuration
224 224 # A list of callback to shape the phase if no data were found.
225 225 # Callback are in the form: func(repo, roots) --> processed root.
226 226 # This list it to be filled by extension during repo setup
227 227 self._phasedefaults = []
228 228 try:
229 229 self.ui.readconfig(self.join("hgrc"), self.root)
230 230 extensions.loadall(self.ui)
231 231 except IOError:
232 232 pass
233 233
234 234 if self.featuresetupfuncs:
235 235 self.supported = set(self._basesupported) # use private copy
236 236 extmods = set(m.__name__ for n, m
237 237 in extensions.extensions(self.ui))
238 238 for setupfunc in self.featuresetupfuncs:
239 239 if setupfunc.__module__ in extmods:
240 240 setupfunc(self.ui, self.supported)
241 241 else:
242 242 self.supported = self._basesupported
243 243
244 244 if not self.vfs.isdir():
245 245 if create:
246 246 if not self.wvfs.exists():
247 247 self.wvfs.makedirs()
248 248 self.vfs.makedir(notindexed=True)
249 249 self.requirements.update(self._baserequirements(create))
250 250 if self.ui.configbool('format', 'usestore', True):
251 251 self.vfs.mkdir("store")
252 252 self.requirements.add("store")
253 253 if self.ui.configbool('format', 'usefncache', True):
254 254 self.requirements.add("fncache")
255 255 if self.ui.configbool('format', 'dotencode', True):
256 256 self.requirements.add('dotencode')
257 257 # create an invalid changelog
258 258 self.vfs.append(
259 259 "00changelog.i",
260 260 '\0\0\0\2' # represents revlogv2
261 261 ' dummy changelog to prevent using the old repo layout'
262 262 )
263 263 if scmutil.gdinitconfig(self.ui):
264 264 self.requirements.add("generaldelta")
265 265 if self.ui.configbool('experimental', 'treemanifest', False):
266 266 self.requirements.add("treemanifest")
267 267 if self.ui.configbool('experimental', 'manifestv2', False):
268 268 self.requirements.add("manifestv2")
269 269 else:
270 270 raise error.RepoError(_("repository %s not found") % path)
271 271 elif create:
272 272 raise error.RepoError(_("repository %s already exists") % path)
273 273 else:
274 274 try:
275 275 self.requirements = scmutil.readrequires(
276 276 self.vfs, self.supported)
277 277 except IOError as inst:
278 278 if inst.errno != errno.ENOENT:
279 279 raise
280 280
281 281 self.sharedpath = self.path
282 282 try:
283 283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
284 284 realpath=True)
285 285 s = vfs.base
286 286 if not vfs.exists():
287 287 raise error.RepoError(
288 288 _('.hg/sharedpath points to nonexistent directory %s') % s)
289 289 self.sharedpath = s
290 290 except IOError as inst:
291 291 if inst.errno != errno.ENOENT:
292 292 raise
293 293
294 294 self.store = store.store(
295 295 self.requirements, self.sharedpath, scmutil.vfs)
296 296 self.spath = self.store.path
297 297 self.svfs = self.store.vfs
298 298 self.sjoin = self.store.join
299 299 self.vfs.createmode = self.store.createmode
300 300 self._applyopenerreqs()
301 301 if create:
302 302 self._writerequirements()
303 303
304 304 self._dirstatevalidatewarned = False
305 305
306 306 self._branchcaches = {}
307 307 self._revbranchcache = None
308 308 self.filterpats = {}
309 309 self._datafilters = {}
310 310 self._transref = self._lockref = self._wlockref = None
311 311
312 312 # A cache for various files under .hg/ that tracks file changes,
313 313 # (used by the filecache decorator)
314 314 #
315 315 # Maps a property name to its util.filecacheentry
316 316 self._filecache = {}
317 317
318 318 # hold sets of revision to be filtered
319 319 # should be cleared when something might have changed the filter value:
320 320 # - new changesets,
321 321 # - phase change,
322 322 # - new obsolescence marker,
323 323 # - working directory parent change,
324 324 # - bookmark changes
325 325 self.filteredrevcache = {}
326 326
327 327 # generic mapping between names and nodes
328 328 self.names = namespaces.namespaces()
329 329
330 330 def close(self):
331 331 self._writecaches()
332 332
333 333 def _writecaches(self):
334 334 if self._revbranchcache:
335 335 self._revbranchcache.write()
336 336
337 337 def _restrictcapabilities(self, caps):
338 338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
339 339 caps = set(caps)
340 340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
341 341 caps.add('bundle2=' + urllib.quote(capsblob))
342 342 return caps
343 343
344 344 def _applyopenerreqs(self):
345 345 self.svfs.options = dict((r, 1) for r in self.requirements
346 346 if r in self.openerreqs)
347 347 # experimental config: format.chunkcachesize
348 348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
349 349 if chunkcachesize is not None:
350 350 self.svfs.options['chunkcachesize'] = chunkcachesize
351 351 # experimental config: format.maxchainlen
352 352 maxchainlen = self.ui.configint('format', 'maxchainlen')
353 353 if maxchainlen is not None:
354 354 self.svfs.options['maxchainlen'] = maxchainlen
355 355 # experimental config: format.manifestcachesize
356 356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
357 357 if manifestcachesize is not None:
358 358 self.svfs.options['manifestcachesize'] = manifestcachesize
359 359 # experimental config: format.aggressivemergedeltas
360 360 aggressivemergedeltas = self.ui.configbool('format',
361 361 'aggressivemergedeltas', False)
362 362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
363 363 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
364 364
365 365 def _writerequirements(self):
366 366 scmutil.writerequires(self.vfs, self.requirements)
367 367
368 368 def _checknested(self, path):
369 369 """Determine if path is a legal nested repository."""
370 370 if not path.startswith(self.root):
371 371 return False
372 372 subpath = path[len(self.root) + 1:]
373 373 normsubpath = util.pconvert(subpath)
374 374
375 375 # XXX: Checking against the current working copy is wrong in
376 376 # the sense that it can reject things like
377 377 #
378 378 # $ hg cat -r 10 sub/x.txt
379 379 #
380 380 # if sub/ is no longer a subrepository in the working copy
381 381 # parent revision.
382 382 #
383 383 # However, it can of course also allow things that would have
384 384 # been rejected before, such as the above cat command if sub/
385 385 # is a subrepository now, but was a normal directory before.
386 386 # The old path auditor would have rejected by mistake since it
387 387 # panics when it sees sub/.hg/.
388 388 #
389 389 # All in all, checking against the working copy seems sensible
390 390 # since we want to prevent access to nested repositories on
391 391 # the filesystem *now*.
392 392 ctx = self[None]
393 393 parts = util.splitpath(subpath)
394 394 while parts:
395 395 prefix = '/'.join(parts)
396 396 if prefix in ctx.substate:
397 397 if prefix == normsubpath:
398 398 return True
399 399 else:
400 400 sub = ctx.sub(prefix)
401 401 return sub.checknested(subpath[len(prefix) + 1:])
402 402 else:
403 403 parts.pop()
404 404 return False
405 405
406 406 def peer(self):
407 407 return localpeer(self) # not cached to avoid reference cycle
408 408
409 409 def unfiltered(self):
410 410 """Return unfiltered version of the repository
411 411
412 412 Intended to be overwritten by filtered repo."""
413 413 return self
414 414
415 415 def filtered(self, name):
416 416 """Return a filtered version of a repository"""
417 417 # build a new class with the mixin and the current class
418 418 # (possibly subclass of the repo)
419 419 class proxycls(repoview.repoview, self.unfiltered().__class__):
420 420 pass
421 421 return proxycls(self, name)
422 422
423 423 @repofilecache('bookmarks')
424 424 def _bookmarks(self):
425 425 return bookmarks.bmstore(self)
426 426
427 427 @repofilecache('bookmarks.current')
428 428 def _activebookmark(self):
429 429 return bookmarks.readactive(self)
430 430
431 431 def bookmarkheads(self, bookmark):
432 432 name = bookmark.split('@', 1)[0]
433 433 heads = []
434 434 for mark, n in self._bookmarks.iteritems():
435 435 if mark.split('@', 1)[0] == name:
436 436 heads.append(n)
437 437 return heads
438 438
439 439 # _phaserevs and _phasesets depend on changelog. what we need is to
440 440 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
441 441 # can't be easily expressed in filecache mechanism.
442 442 @storecache('phaseroots', '00changelog.i')
443 443 def _phasecache(self):
444 444 return phases.phasecache(self, self._phasedefaults)
445 445
446 446 @storecache('obsstore')
447 447 def obsstore(self):
448 448 # read default format for new obsstore.
449 449 # developer config: format.obsstore-version
450 450 defaultformat = self.ui.configint('format', 'obsstore-version', None)
451 451 # rely on obsstore class default when possible.
452 452 kwargs = {}
453 453 if defaultformat is not None:
454 454 kwargs['defaultformat'] = defaultformat
455 455 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
456 456 store = obsolete.obsstore(self.svfs, readonly=readonly,
457 457 **kwargs)
458 458 if store and readonly:
459 459 self.ui.warn(
460 460 _('obsolete feature not enabled but %i markers found!\n')
461 461 % len(list(store)))
462 462 return store
463 463
464 464 @storecache('00changelog.i')
465 465 def changelog(self):
466 466 c = changelog.changelog(self.svfs)
467 467 if 'HG_PENDING' in os.environ:
468 468 p = os.environ['HG_PENDING']
469 469 if p.startswith(self.root):
470 470 c.readpending('00changelog.i.a')
471 471 return c
472 472
473 473 @storecache('00manifest.i')
474 474 def manifest(self):
475 475 return manifest.manifest(self.svfs)
476 476
477 477 def dirlog(self, dir):
478 478 return self.manifest.dirlog(dir)
479 479
480 480 @repofilecache('dirstate')
481 481 def dirstate(self):
482 482 return dirstate.dirstate(self.vfs, self.ui, self.root,
483 483 self._dirstatevalidate)
484 484
485 485 def _dirstatevalidate(self, node):
486 486 try:
487 487 self.changelog.rev(node)
488 488 return node
489 489 except error.LookupError:
490 490 if not self._dirstatevalidatewarned:
491 491 self._dirstatevalidatewarned = True
492 492 self.ui.warn(_("warning: ignoring unknown"
493 493 " working parent %s!\n") % short(node))
494 494 return nullid
495 495
496 496 def __getitem__(self, changeid):
497 497 if changeid is None or changeid == wdirrev:
498 498 return context.workingctx(self)
499 499 if isinstance(changeid, slice):
500 500 return [context.changectx(self, i)
501 501 for i in xrange(*changeid.indices(len(self)))
502 502 if i not in self.changelog.filteredrevs]
503 503 return context.changectx(self, changeid)
504 504
505 505 def __contains__(self, changeid):
506 506 try:
507 507 self[changeid]
508 508 return True
509 509 except error.RepoLookupError:
510 510 return False
511 511
512 512 def __nonzero__(self):
513 513 return True
514 514
515 515 def __len__(self):
516 516 return len(self.changelog)
517 517
518 518 def __iter__(self):
519 519 return iter(self.changelog)
520 520
521 521 def revs(self, expr, *args):
522 522 '''Find revisions matching a revset.
523 523
524 524 The revset is specified as a string ``expr`` that may contain
525 525 %-formatting to escape certain types. See ``revset.formatspec``.
526 526
527 527 Return a revset.abstractsmartset, which is a list-like interface
528 528 that contains integer revisions.
529 529 '''
530 530 expr = revset.formatspec(expr, *args)
531 531 m = revset.match(None, expr)
532 532 return m(self)
533 533
534 534 def set(self, expr, *args):
535 535 '''Find revisions matching a revset and emit changectx instances.
536 536
537 537 This is a convenience wrapper around ``revs()`` that iterates the
538 538 result and is a generator of changectx instances.
539 539 '''
540 540 for r in self.revs(expr, *args):
541 541 yield self[r]
542 542
543 543 def url(self):
544 544 return 'file:' + self.root
545 545
546 546 def hook(self, name, throw=False, **args):
547 547 """Call a hook, passing this repo instance.
548 548
549 549 This a convenience method to aid invoking hooks. Extensions likely
550 550 won't call this unless they have registered a custom hook or are
551 551 replacing code that is expected to call a hook.
552 552 """
553 553 return hook.hook(self.ui, self, name, throw, **args)
554 554
555 555 @unfilteredmethod
556 556 def _tag(self, names, node, message, local, user, date, extra=None,
557 557 editor=False):
558 558 if isinstance(names, str):
559 559 names = (names,)
560 560
561 561 branches = self.branchmap()
562 562 for name in names:
563 563 self.hook('pretag', throw=True, node=hex(node), tag=name,
564 564 local=local)
565 565 if name in branches:
566 566 self.ui.warn(_("warning: tag %s conflicts with existing"
567 567 " branch name\n") % name)
568 568
569 569 def writetags(fp, names, munge, prevtags):
570 570 fp.seek(0, 2)
571 571 if prevtags and prevtags[-1] != '\n':
572 572 fp.write('\n')
573 573 for name in names:
574 574 if munge:
575 575 m = munge(name)
576 576 else:
577 577 m = name
578 578
579 579 if (self._tagscache.tagtypes and
580 580 name in self._tagscache.tagtypes):
581 581 old = self.tags().get(name, nullid)
582 582 fp.write('%s %s\n' % (hex(old), m))
583 583 fp.write('%s %s\n' % (hex(node), m))
584 584 fp.close()
585 585
586 586 prevtags = ''
587 587 if local:
588 588 try:
589 589 fp = self.vfs('localtags', 'r+')
590 590 except IOError:
591 591 fp = self.vfs('localtags', 'a')
592 592 else:
593 593 prevtags = fp.read()
594 594
595 595 # local tags are stored in the current charset
596 596 writetags(fp, names, None, prevtags)
597 597 for name in names:
598 598 self.hook('tag', node=hex(node), tag=name, local=local)
599 599 return
600 600
601 601 try:
602 602 fp = self.wfile('.hgtags', 'rb+')
603 603 except IOError as e:
604 604 if e.errno != errno.ENOENT:
605 605 raise
606 606 fp = self.wfile('.hgtags', 'ab')
607 607 else:
608 608 prevtags = fp.read()
609 609
610 610 # committed tags are stored in UTF-8
611 611 writetags(fp, names, encoding.fromlocal, prevtags)
612 612
613 613 fp.close()
614 614
615 615 self.invalidatecaches()
616 616
617 617 if '.hgtags' not in self.dirstate:
618 618 self[None].add(['.hgtags'])
619 619
620 620 m = matchmod.exact(self.root, '', ['.hgtags'])
621 621 tagnode = self.commit(message, user, date, extra=extra, match=m,
622 622 editor=editor)
623 623
624 624 for name in names:
625 625 self.hook('tag', node=hex(node), tag=name, local=local)
626 626
627 627 return tagnode
628 628
629 629 def tag(self, names, node, message, local, user, date, editor=False):
630 630 '''tag a revision with one or more symbolic names.
631 631
632 632 names is a list of strings or, when adding a single tag, names may be a
633 633 string.
634 634
635 635 if local is True, the tags are stored in a per-repository file.
636 636 otherwise, they are stored in the .hgtags file, and a new
637 637 changeset is committed with the change.
638 638
639 639 keyword arguments:
640 640
641 641 local: whether to store tags in non-version-controlled file
642 642 (default False)
643 643
644 644 message: commit message to use if committing
645 645
646 646 user: name of user to use if committing
647 647
648 648 date: date tuple to use if committing'''
649 649
650 650 if not local:
651 651 m = matchmod.exact(self.root, '', ['.hgtags'])
652 652 if any(self.status(match=m, unknown=True, ignored=True)):
653 653 raise error.Abort(_('working copy of .hgtags is changed'),
654 654 hint=_('please commit .hgtags manually'))
655 655
656 656 self.tags() # instantiate the cache
657 657 self._tag(names, node, message, local, user, date, editor=editor)
658 658
659 659 @filteredpropertycache
660 660 def _tagscache(self):
661 661 '''Returns a tagscache object that contains various tags related
662 662 caches.'''
663 663
664 664 # This simplifies its cache management by having one decorated
665 665 # function (this one) and the rest simply fetch things from it.
666 666 class tagscache(object):
667 667 def __init__(self):
668 668 # These two define the set of tags for this repository. tags
669 669 # maps tag name to node; tagtypes maps tag name to 'global' or
670 670 # 'local'. (Global tags are defined by .hgtags across all
671 671 # heads, and local tags are defined in .hg/localtags.)
672 672 # They constitute the in-memory cache of tags.
673 673 self.tags = self.tagtypes = None
674 674
675 675 self.nodetagscache = self.tagslist = None
676 676
677 677 cache = tagscache()
678 678 cache.tags, cache.tagtypes = self._findtags()
679 679
680 680 return cache
681 681
682 682 def tags(self):
683 683 '''return a mapping of tag to node'''
684 684 t = {}
685 685 if self.changelog.filteredrevs:
686 686 tags, tt = self._findtags()
687 687 else:
688 688 tags = self._tagscache.tags
689 689 for k, v in tags.iteritems():
690 690 try:
691 691 # ignore tags to unknown nodes
692 692 self.changelog.rev(v)
693 693 t[k] = v
694 694 except (error.LookupError, ValueError):
695 695 pass
696 696 return t
697 697
698 698 def _findtags(self):
699 699 '''Do the hard work of finding tags. Return a pair of dicts
700 700 (tags, tagtypes) where tags maps tag name to node, and tagtypes
701 701 maps tag name to a string like \'global\' or \'local\'.
702 702 Subclasses or extensions are free to add their own tags, but
703 703 should be aware that the returned dicts will be retained for the
704 704 duration of the localrepo object.'''
705 705
706 706 # XXX what tagtype should subclasses/extensions use? Currently
707 707 # mq and bookmarks add tags, but do not set the tagtype at all.
708 708 # Should each extension invent its own tag type? Should there
709 709 # be one tagtype for all such "virtual" tags? Or is the status
710 710 # quo fine?
711 711
712 712 alltags = {} # map tag name to (node, hist)
713 713 tagtypes = {}
714 714
715 715 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
716 716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717 717
718 718 # Build the return dicts. Have to re-encode tag names because
719 719 # the tags module always uses UTF-8 (in order not to lose info
720 720 # writing to the cache), but the rest of Mercurial wants them in
721 721 # local encoding.
722 722 tags = {}
723 723 for (name, (node, hist)) in alltags.iteritems():
724 724 if node != nullid:
725 725 tags[encoding.tolocal(name)] = node
726 726 tags['tip'] = self.changelog.tip()
727 727 tagtypes = dict([(encoding.tolocal(name), value)
728 728 for (name, value) in tagtypes.iteritems()])
729 729 return (tags, tagtypes)
730 730
731 731 def tagtype(self, tagname):
732 732 '''
733 733 return the type of the given tag. result can be:
734 734
735 735 'local' : a local tag
736 736 'global' : a global tag
737 737 None : tag does not exist
738 738 '''
739 739
740 740 return self._tagscache.tagtypes.get(tagname)
741 741
742 742 def tagslist(self):
743 743 '''return a list of tags ordered by revision'''
744 744 if not self._tagscache.tagslist:
745 745 l = []
746 746 for t, n in self.tags().iteritems():
747 747 l.append((self.changelog.rev(n), t, n))
748 748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749 749
750 750 return self._tagscache.tagslist
751 751
752 752 def nodetags(self, node):
753 753 '''return the tags associated with a node'''
754 754 if not self._tagscache.nodetagscache:
755 755 nodetagscache = {}
756 756 for t, n in self._tagscache.tags.iteritems():
757 757 nodetagscache.setdefault(n, []).append(t)
758 758 for tags in nodetagscache.itervalues():
759 759 tags.sort()
760 760 self._tagscache.nodetagscache = nodetagscache
761 761 return self._tagscache.nodetagscache.get(node, [])
762 762
763 763 def nodebookmarks(self, node):
764 764 """return the list of bookmarks pointing to the specified node"""
765 765 marks = []
766 766 for bookmark, n in self._bookmarks.iteritems():
767 767 if n == node:
768 768 marks.append(bookmark)
769 769 return sorted(marks)
770 770
771 771 def branchmap(self):
772 772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 773 ordered by increasing revision number'''
774 774 branchmap.updatecache(self)
775 775 return self._branchcaches[self.filtername]
776 776
777 777 @unfilteredmethod
778 778 def revbranchcache(self):
779 779 if not self._revbranchcache:
780 780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 781 return self._revbranchcache
782 782
783 783 def branchtip(self, branch, ignoremissing=False):
784 784 '''return the tip node for a given branch
785 785
786 786 If ignoremissing is True, then this method will not raise an error.
787 787 This is helpful for callers that only expect None for a missing branch
788 788 (e.g. namespace).
789 789
790 790 '''
791 791 try:
792 792 return self.branchmap().branchtip(branch)
793 793 except KeyError:
794 794 if not ignoremissing:
795 795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 796 else:
797 797 pass
798 798
799 799 def lookup(self, key):
800 800 return self[key].node()
801 801
802 802 def lookupbranch(self, key, remote=None):
803 803 repo = remote or self
804 804 if key in repo.branchmap():
805 805 return key
806 806
807 807 repo = (remote and remote.local()) and remote or self
808 808 return repo[key].branch()
809 809
810 810 def known(self, nodes):
811 nm = self.changelog.nodemap
812 pc = self._phasecache
811 cl = self.changelog
812 nm = cl.nodemap
813 filtered = cl.filteredrevs
813 814 result = []
814 815 for n in nodes:
815 816 r = nm.get(n)
816 resp = not (r is None or pc.phase(self, r) >= phases.secret)
817 resp = not (r is None or r in filtered)
817 818 result.append(resp)
818 819 return result
819 820
820 821 def local(self):
821 822 return self
822 823
823 824 def publishing(self):
824 825 # it's safe (and desirable) to trust the publish flag unconditionally
825 826 # so that we don't finalize changes shared between users via ssh or nfs
826 827 return self.ui.configbool('phases', 'publish', True, untrusted=True)
827 828
828 829 def cancopy(self):
829 830 # so statichttprepo's override of local() works
830 831 if not self.local():
831 832 return False
832 833 if not self.publishing():
833 834 return True
834 835 # if publishing we can't copy if there is filtered content
835 836 return not self.filtered('visible').changelog.filteredrevs
836 837
837 838 def shared(self):
838 839 '''the type of shared repository (None if not shared)'''
839 840 if self.sharedpath != self.path:
840 841 return 'store'
841 842 return None
842 843
843 844 def join(self, f, *insidef):
844 845 return self.vfs.join(os.path.join(f, *insidef))
845 846
846 847 def wjoin(self, f, *insidef):
847 848 return self.vfs.reljoin(self.root, f, *insidef)
848 849
849 850 def file(self, f):
850 851 if f[0] == '/':
851 852 f = f[1:]
852 853 return filelog.filelog(self.svfs, f)
853 854
854 855 def parents(self, changeid=None):
855 856 '''get list of changectxs for parents of changeid'''
856 857 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
857 858 self.ui.deprecwarn(msg, '3.7')
858 859 return self[changeid].parents()
859 860
860 861 def changectx(self, changeid):
861 862 return self[changeid]
862 863
863 864 def setparents(self, p1, p2=nullid):
864 865 self.dirstate.beginparentchange()
865 866 copies = self.dirstate.setparents(p1, p2)
866 867 pctx = self[p1]
867 868 if copies:
868 869 # Adjust copy records, the dirstate cannot do it, it
869 870 # requires access to parents manifests. Preserve them
870 871 # only for entries added to first parent.
871 872 for f in copies:
872 873 if f not in pctx and copies[f] in pctx:
873 874 self.dirstate.copy(copies[f], f)
874 875 if p2 == nullid:
875 876 for f, s in sorted(self.dirstate.copies().items()):
876 877 if f not in pctx and s not in pctx:
877 878 self.dirstate.copy(None, f)
878 879 self.dirstate.endparentchange()
879 880
880 881 def filectx(self, path, changeid=None, fileid=None):
881 882 """changeid can be a changeset revision, node, or tag.
882 883 fileid can be a file revision or node."""
883 884 return context.filectx(self, path, changeid, fileid)
884 885
885 886 def getcwd(self):
886 887 return self.dirstate.getcwd()
887 888
888 889 def pathto(self, f, cwd=None):
889 890 return self.dirstate.pathto(f, cwd)
890 891
891 892 def wfile(self, f, mode='r'):
892 893 return self.wvfs(f, mode)
893 894
894 895 def _link(self, f):
895 896 return self.wvfs.islink(f)
896 897
897 898 def _loadfilter(self, filter):
898 899 if filter not in self.filterpats:
899 900 l = []
900 901 for pat, cmd in self.ui.configitems(filter):
901 902 if cmd == '!':
902 903 continue
903 904 mf = matchmod.match(self.root, '', [pat])
904 905 fn = None
905 906 params = cmd
906 907 for name, filterfn in self._datafilters.iteritems():
907 908 if cmd.startswith(name):
908 909 fn = filterfn
909 910 params = cmd[len(name):].lstrip()
910 911 break
911 912 if not fn:
912 913 fn = lambda s, c, **kwargs: util.filter(s, c)
913 914 # Wrap old filters not supporting keyword arguments
914 915 if not inspect.getargspec(fn)[2]:
915 916 oldfn = fn
916 917 fn = lambda s, c, **kwargs: oldfn(s, c)
917 918 l.append((mf, fn, params))
918 919 self.filterpats[filter] = l
919 920 return self.filterpats[filter]
920 921
921 922 def _filter(self, filterpats, filename, data):
922 923 for mf, fn, cmd in filterpats:
923 924 if mf(filename):
924 925 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
925 926 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
926 927 break
927 928
928 929 return data
929 930
930 931 @unfilteredpropertycache
931 932 def _encodefilterpats(self):
932 933 return self._loadfilter('encode')
933 934
934 935 @unfilteredpropertycache
935 936 def _decodefilterpats(self):
936 937 return self._loadfilter('decode')
937 938
938 939 def adddatafilter(self, name, filter):
939 940 self._datafilters[name] = filter
940 941
941 942 def wread(self, filename):
942 943 if self._link(filename):
943 944 data = self.wvfs.readlink(filename)
944 945 else:
945 946 data = self.wvfs.read(filename)
946 947 return self._filter(self._encodefilterpats, filename, data)
947 948
948 949 def wwrite(self, filename, data, flags):
949 950 """write ``data`` into ``filename`` in the working directory
950 951
951 952 This returns length of written (maybe decoded) data.
952 953 """
953 954 data = self._filter(self._decodefilterpats, filename, data)
954 955 if 'l' in flags:
955 956 self.wvfs.symlink(data, filename)
956 957 else:
957 958 self.wvfs.write(filename, data)
958 959 if 'x' in flags:
959 960 self.wvfs.setflags(filename, False, True)
960 961 return len(data)
961 962
962 963 def wwritedata(self, filename, data):
963 964 return self._filter(self._decodefilterpats, filename, data)
964 965
965 966 def currenttransaction(self):
966 967 """return the current transaction or None if non exists"""
967 968 if self._transref:
968 969 tr = self._transref()
969 970 else:
970 971 tr = None
971 972
972 973 if tr and tr.running():
973 974 return tr
974 975 return None
975 976
976 977 def transaction(self, desc, report=None):
977 978 if (self.ui.configbool('devel', 'all-warnings')
978 979 or self.ui.configbool('devel', 'check-locks')):
979 980 l = self._lockref and self._lockref()
980 981 if l is None or not l.held:
981 982 self.ui.develwarn('transaction with no lock')
982 983 tr = self.currenttransaction()
983 984 if tr is not None:
984 985 return tr.nest()
985 986
986 987 # abort here if the journal already exists
987 988 if self.svfs.exists("journal"):
988 989 raise error.RepoError(
989 990 _("abandoned transaction found"),
990 991 hint=_("run 'hg recover' to clean up transaction"))
991 992
992 993 # make journal.dirstate contain in-memory changes at this point
993 994 self.dirstate.write(None)
994 995
995 996 idbase = "%.40f#%f" % (random.random(), time.time())
996 997 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
997 998 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
998 999
999 1000 self._writejournal(desc)
1000 1001 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1001 1002 if report:
1002 1003 rp = report
1003 1004 else:
1004 1005 rp = self.ui.warn
1005 1006 vfsmap = {'plain': self.vfs} # root of .hg/
1006 1007 # we must avoid cyclic reference between repo and transaction.
1007 1008 reporef = weakref.ref(self)
1008 1009 def validate(tr):
1009 1010 """will run pre-closing hooks"""
1010 1011 reporef().hook('pretxnclose', throw=True,
1011 1012 txnname=desc, **tr.hookargs)
1012 1013 def releasefn(tr, success):
1013 1014 repo = reporef()
1014 1015 if success:
1015 1016 # this should be explicitly invoked here, because
1016 1017 # in-memory changes aren't written out at closing
1017 1018 # transaction, if tr.addfilegenerator (via
1018 1019 # dirstate.write or so) isn't invoked while
1019 1020 # transaction running
1020 1021 repo.dirstate.write(None)
1021 1022 else:
1022 1023 # prevent in-memory changes from being written out at
1023 1024 # the end of outer wlock scope or so
1024 1025 repo.dirstate.invalidate()
1025 1026
1026 1027 # discard all changes (including ones already written
1027 1028 # out) in this transaction
1028 1029 repo.vfs.rename('journal.dirstate', 'dirstate')
1029 1030
1030 1031 repo.invalidate(clearfilecache=True)
1031 1032
1032 1033 tr = transaction.transaction(rp, self.svfs, vfsmap,
1033 1034 "journal",
1034 1035 "undo",
1035 1036 aftertrans(renames),
1036 1037 self.store.createmode,
1037 1038 validator=validate,
1038 1039 releasefn=releasefn)
1039 1040
1040 1041 tr.hookargs['txnid'] = txnid
1041 1042 # note: writing the fncache only during finalize mean that the file is
1042 1043 # outdated when running hooks. As fncache is used for streaming clone,
1043 1044 # this is not expected to break anything that happen during the hooks.
1044 1045 tr.addfinalize('flush-fncache', self.store.write)
1045 1046 def txnclosehook(tr2):
1046 1047 """To be run if transaction is successful, will schedule a hook run
1047 1048 """
1048 1049 def hook():
1049 1050 reporef().hook('txnclose', throw=False, txnname=desc,
1050 1051 **tr2.hookargs)
1051 1052 reporef()._afterlock(hook)
1052 1053 tr.addfinalize('txnclose-hook', txnclosehook)
1053 1054 def txnaborthook(tr2):
1054 1055 """To be run if transaction is aborted
1055 1056 """
1056 1057 reporef().hook('txnabort', throw=False, txnname=desc,
1057 1058 **tr2.hookargs)
1058 1059 tr.addabort('txnabort-hook', txnaborthook)
1059 1060 # avoid eager cache invalidation. in-memory data should be identical
1060 1061 # to stored data if transaction has no error.
1061 1062 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1062 1063 self._transref = weakref.ref(tr)
1063 1064 return tr
1064 1065
1065 1066 def _journalfiles(self):
1066 1067 return ((self.svfs, 'journal'),
1067 1068 (self.vfs, 'journal.dirstate'),
1068 1069 (self.vfs, 'journal.branch'),
1069 1070 (self.vfs, 'journal.desc'),
1070 1071 (self.vfs, 'journal.bookmarks'),
1071 1072 (self.svfs, 'journal.phaseroots'))
1072 1073
1073 1074 def undofiles(self):
1074 1075 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1075 1076
1076 1077 def _writejournal(self, desc):
1077 1078 self.vfs.write("journal.dirstate",
1078 1079 self.vfs.tryread("dirstate"))
1079 1080 self.vfs.write("journal.branch",
1080 1081 encoding.fromlocal(self.dirstate.branch()))
1081 1082 self.vfs.write("journal.desc",
1082 1083 "%d\n%s\n" % (len(self), desc))
1083 1084 self.vfs.write("journal.bookmarks",
1084 1085 self.vfs.tryread("bookmarks"))
1085 1086 self.svfs.write("journal.phaseroots",
1086 1087 self.svfs.tryread("phaseroots"))
1087 1088
1088 1089 def recover(self):
1089 1090 lock = self.lock()
1090 1091 try:
1091 1092 if self.svfs.exists("journal"):
1092 1093 self.ui.status(_("rolling back interrupted transaction\n"))
1093 1094 vfsmap = {'': self.svfs,
1094 1095 'plain': self.vfs,}
1095 1096 transaction.rollback(self.svfs, vfsmap, "journal",
1096 1097 self.ui.warn)
1097 1098 self.invalidate()
1098 1099 return True
1099 1100 else:
1100 1101 self.ui.warn(_("no interrupted transaction available\n"))
1101 1102 return False
1102 1103 finally:
1103 1104 lock.release()
1104 1105
1105 1106 def rollback(self, dryrun=False, force=False):
1106 1107 wlock = lock = dsguard = None
1107 1108 try:
1108 1109 wlock = self.wlock()
1109 1110 lock = self.lock()
1110 1111 if self.svfs.exists("undo"):
1111 1112 dsguard = cmdutil.dirstateguard(self, 'rollback')
1112 1113
1113 1114 return self._rollback(dryrun, force, dsguard)
1114 1115 else:
1115 1116 self.ui.warn(_("no rollback information available\n"))
1116 1117 return 1
1117 1118 finally:
1118 1119 release(dsguard, lock, wlock)
1119 1120
1120 1121 @unfilteredmethod # Until we get smarter cache management
1121 1122 def _rollback(self, dryrun, force, dsguard):
1122 1123 ui = self.ui
1123 1124 try:
1124 1125 args = self.vfs.read('undo.desc').splitlines()
1125 1126 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1126 1127 if len(args) >= 3:
1127 1128 detail = args[2]
1128 1129 oldtip = oldlen - 1
1129 1130
1130 1131 if detail and ui.verbose:
1131 1132 msg = (_('repository tip rolled back to revision %s'
1132 1133 ' (undo %s: %s)\n')
1133 1134 % (oldtip, desc, detail))
1134 1135 else:
1135 1136 msg = (_('repository tip rolled back to revision %s'
1136 1137 ' (undo %s)\n')
1137 1138 % (oldtip, desc))
1138 1139 except IOError:
1139 1140 msg = _('rolling back unknown transaction\n')
1140 1141 desc = None
1141 1142
1142 1143 if not force and self['.'] != self['tip'] and desc == 'commit':
1143 1144 raise error.Abort(
1144 1145 _('rollback of last commit while not checked out '
1145 1146 'may lose data'), hint=_('use -f to force'))
1146 1147
1147 1148 ui.status(msg)
1148 1149 if dryrun:
1149 1150 return 0
1150 1151
1151 1152 parents = self.dirstate.parents()
1152 1153 self.destroying()
1153 1154 vfsmap = {'plain': self.vfs, '': self.svfs}
1154 1155 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1155 1156 if self.vfs.exists('undo.bookmarks'):
1156 1157 self.vfs.rename('undo.bookmarks', 'bookmarks')
1157 1158 if self.svfs.exists('undo.phaseroots'):
1158 1159 self.svfs.rename('undo.phaseroots', 'phaseroots')
1159 1160 self.invalidate()
1160 1161
1161 1162 parentgone = (parents[0] not in self.changelog.nodemap or
1162 1163 parents[1] not in self.changelog.nodemap)
1163 1164 if parentgone:
1164 1165 # prevent dirstateguard from overwriting already restored one
1165 1166 dsguard.close()
1166 1167
1167 1168 self.vfs.rename('undo.dirstate', 'dirstate')
1168 1169 try:
1169 1170 branch = self.vfs.read('undo.branch')
1170 1171 self.dirstate.setbranch(encoding.tolocal(branch))
1171 1172 except IOError:
1172 1173 ui.warn(_('named branch could not be reset: '
1173 1174 'current branch is still \'%s\'\n')
1174 1175 % self.dirstate.branch())
1175 1176
1176 1177 self.dirstate.invalidate()
1177 1178 parents = tuple([p.rev() for p in self[None].parents()])
1178 1179 if len(parents) > 1:
1179 1180 ui.status(_('working directory now based on '
1180 1181 'revisions %d and %d\n') % parents)
1181 1182 else:
1182 1183 ui.status(_('working directory now based on '
1183 1184 'revision %d\n') % parents)
1184 1185 mergemod.mergestate.clean(self, self['.'].node())
1185 1186
1186 1187 # TODO: if we know which new heads may result from this rollback, pass
1187 1188 # them to destroy(), which will prevent the branchhead cache from being
1188 1189 # invalidated.
1189 1190 self.destroyed()
1190 1191 return 0
1191 1192
1192 1193 def invalidatecaches(self):
1193 1194
1194 1195 if '_tagscache' in vars(self):
1195 1196 # can't use delattr on proxy
1196 1197 del self.__dict__['_tagscache']
1197 1198
1198 1199 self.unfiltered()._branchcaches.clear()
1199 1200 self.invalidatevolatilesets()
1200 1201
1201 1202 def invalidatevolatilesets(self):
1202 1203 self.filteredrevcache.clear()
1203 1204 obsolete.clearobscaches(self)
1204 1205
1205 1206 def invalidatedirstate(self):
1206 1207 '''Invalidates the dirstate, causing the next call to dirstate
1207 1208 to check if it was modified since the last time it was read,
1208 1209 rereading it if it has.
1209 1210
1210 1211 This is different to dirstate.invalidate() that it doesn't always
1211 1212 rereads the dirstate. Use dirstate.invalidate() if you want to
1212 1213 explicitly read the dirstate again (i.e. restoring it to a previous
1213 1214 known good state).'''
1214 1215 if hasunfilteredcache(self, 'dirstate'):
1215 1216 for k in self.dirstate._filecache:
1216 1217 try:
1217 1218 delattr(self.dirstate, k)
1218 1219 except AttributeError:
1219 1220 pass
1220 1221 delattr(self.unfiltered(), 'dirstate')
1221 1222
1222 1223 def invalidate(self, clearfilecache=False):
1223 1224 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 1225 for k in self._filecache.keys():
1225 1226 # dirstate is invalidated separately in invalidatedirstate()
1226 1227 if k == 'dirstate':
1227 1228 continue
1228 1229
1229 1230 if clearfilecache:
1230 1231 del self._filecache[k]
1231 1232 try:
1232 1233 delattr(unfiltered, k)
1233 1234 except AttributeError:
1234 1235 pass
1235 1236 self.invalidatecaches()
1236 1237 self.store.invalidatecaches()
1237 1238
1238 1239 def invalidateall(self):
1239 1240 '''Fully invalidates both store and non-store parts, causing the
1240 1241 subsequent operation to reread any outside changes.'''
1241 1242 # extension should hook this to invalidate its caches
1242 1243 self.invalidate()
1243 1244 self.invalidatedirstate()
1244 1245
1245 1246 def _refreshfilecachestats(self, tr):
1246 1247 """Reload stats of cached files so that they are flagged as valid"""
1247 1248 for k, ce in self._filecache.items():
1248 1249 if k == 'dirstate' or k not in self.__dict__:
1249 1250 continue
1250 1251 ce.refresh()
1251 1252
1252 1253 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1253 1254 inheritchecker=None, parentenvvar=None):
1254 1255 parentlock = None
1255 1256 # the contents of parentenvvar are used by the underlying lock to
1256 1257 # determine whether it can be inherited
1257 1258 if parentenvvar is not None:
1258 1259 parentlock = os.environ.get(parentenvvar)
1259 1260 try:
1260 1261 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1261 1262 acquirefn=acquirefn, desc=desc,
1262 1263 inheritchecker=inheritchecker,
1263 1264 parentlock=parentlock)
1264 1265 except error.LockHeld as inst:
1265 1266 if not wait:
1266 1267 raise
1267 1268 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1268 1269 (desc, inst.locker))
1269 1270 # default to 600 seconds timeout
1270 1271 l = lockmod.lock(vfs, lockname,
1271 1272 int(self.ui.config("ui", "timeout", "600")),
1272 1273 releasefn=releasefn, acquirefn=acquirefn,
1273 1274 desc=desc)
1274 1275 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1275 1276 return l
1276 1277
1277 1278 def _afterlock(self, callback):
1278 1279 """add a callback to be run when the repository is fully unlocked
1279 1280
1280 1281 The callback will be executed when the outermost lock is released
1281 1282 (with wlock being higher level than 'lock')."""
1282 1283 for ref in (self._wlockref, self._lockref):
1283 1284 l = ref and ref()
1284 1285 if l and l.held:
1285 1286 l.postrelease.append(callback)
1286 1287 break
1287 1288 else: # no lock have been found.
1288 1289 callback()
1289 1290
1290 1291 def lock(self, wait=True):
1291 1292 '''Lock the repository store (.hg/store) and return a weak reference
1292 1293 to the lock. Use this before modifying the store (e.g. committing or
1293 1294 stripping). If you are opening a transaction, get a lock as well.)
1294 1295
1295 1296 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1296 1297 'wlock' first to avoid a dead-lock hazard.'''
1297 1298 l = self._lockref and self._lockref()
1298 1299 if l is not None and l.held:
1299 1300 l.lock()
1300 1301 return l
1301 1302
1302 1303 l = self._lock(self.svfs, "lock", wait, None,
1303 1304 self.invalidate, _('repository %s') % self.origroot)
1304 1305 self._lockref = weakref.ref(l)
1305 1306 return l
1306 1307
1307 1308 def _wlockchecktransaction(self):
1308 1309 if self.currenttransaction() is not None:
1309 1310 raise error.LockInheritanceContractViolation(
1310 1311 'wlock cannot be inherited in the middle of a transaction')
1311 1312
1312 1313 def wlock(self, wait=True):
1313 1314 '''Lock the non-store parts of the repository (everything under
1314 1315 .hg except .hg/store) and return a weak reference to the lock.
1315 1316
1316 1317 Use this before modifying files in .hg.
1317 1318
1318 1319 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1319 1320 'wlock' first to avoid a dead-lock hazard.'''
1320 1321 l = self._wlockref and self._wlockref()
1321 1322 if l is not None and l.held:
1322 1323 l.lock()
1323 1324 return l
1324 1325
1325 1326 # We do not need to check for non-waiting lock acquisition. Such
1326 1327 # acquisition would not cause dead-lock as they would just fail.
1327 1328 if wait and (self.ui.configbool('devel', 'all-warnings')
1328 1329 or self.ui.configbool('devel', 'check-locks')):
1329 1330 l = self._lockref and self._lockref()
1330 1331 if l is not None and l.held:
1331 1332 self.ui.develwarn('"wlock" acquired after "lock"')
1332 1333
1333 1334 def unlock():
1334 1335 if self.dirstate.pendingparentchange():
1335 1336 self.dirstate.invalidate()
1336 1337 else:
1337 1338 self.dirstate.write(None)
1338 1339
1339 1340 self._filecache['dirstate'].refresh()
1340 1341
1341 1342 l = self._lock(self.vfs, "wlock", wait, unlock,
1342 1343 self.invalidatedirstate, _('working directory of %s') %
1343 1344 self.origroot,
1344 1345 inheritchecker=self._wlockchecktransaction,
1345 1346 parentenvvar='HG_WLOCK_LOCKER')
1346 1347 self._wlockref = weakref.ref(l)
1347 1348 return l
1348 1349
1349 1350 def _currentlock(self, lockref):
1350 1351 """Returns the lock if it's held, or None if it's not."""
1351 1352 if lockref is None:
1352 1353 return None
1353 1354 l = lockref()
1354 1355 if l is None or not l.held:
1355 1356 return None
1356 1357 return l
1357 1358
1358 1359 def currentwlock(self):
1359 1360 """Returns the wlock if it's held, or None if it's not."""
1360 1361 return self._currentlock(self._wlockref)
1361 1362
1362 1363 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1363 1364 """
1364 1365 commit an individual file as part of a larger transaction
1365 1366 """
1366 1367
1367 1368 fname = fctx.path()
1368 1369 fparent1 = manifest1.get(fname, nullid)
1369 1370 fparent2 = manifest2.get(fname, nullid)
1370 1371 if isinstance(fctx, context.filectx):
1371 1372 node = fctx.filenode()
1372 1373 if node in [fparent1, fparent2]:
1373 1374 self.ui.debug('reusing %s filelog entry\n' % fname)
1374 1375 return node
1375 1376
1376 1377 flog = self.file(fname)
1377 1378 meta = {}
1378 1379 copy = fctx.renamed()
1379 1380 if copy and copy[0] != fname:
1380 1381 # Mark the new revision of this file as a copy of another
1381 1382 # file. This copy data will effectively act as a parent
1382 1383 # of this new revision. If this is a merge, the first
1383 1384 # parent will be the nullid (meaning "look up the copy data")
1384 1385 # and the second one will be the other parent. For example:
1385 1386 #
1386 1387 # 0 --- 1 --- 3 rev1 changes file foo
1387 1388 # \ / rev2 renames foo to bar and changes it
1388 1389 # \- 2 -/ rev3 should have bar with all changes and
1389 1390 # should record that bar descends from
1390 1391 # bar in rev2 and foo in rev1
1391 1392 #
1392 1393 # this allows this merge to succeed:
1393 1394 #
1394 1395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1395 1396 # \ / merging rev3 and rev4 should use bar@rev2
1396 1397 # \- 2 --- 4 as the merge base
1397 1398 #
1398 1399
1399 1400 cfname = copy[0]
1400 1401 crev = manifest1.get(cfname)
1401 1402 newfparent = fparent2
1402 1403
1403 1404 if manifest2: # branch merge
1404 1405 if fparent2 == nullid or crev is None: # copied on remote side
1405 1406 if cfname in manifest2:
1406 1407 crev = manifest2[cfname]
1407 1408 newfparent = fparent1
1408 1409
1409 1410 # Here, we used to search backwards through history to try to find
1410 1411 # where the file copy came from if the source of a copy was not in
1411 1412 # the parent directory. However, this doesn't actually make sense to
1412 1413 # do (what does a copy from something not in your working copy even
1413 1414 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1414 1415 # the user that copy information was dropped, so if they didn't
1415 1416 # expect this outcome it can be fixed, but this is the correct
1416 1417 # behavior in this circumstance.
1417 1418
1418 1419 if crev:
1419 1420 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1420 1421 meta["copy"] = cfname
1421 1422 meta["copyrev"] = hex(crev)
1422 1423 fparent1, fparent2 = nullid, newfparent
1423 1424 else:
1424 1425 self.ui.warn(_("warning: can't find ancestor for '%s' "
1425 1426 "copied from '%s'!\n") % (fname, cfname))
1426 1427
1427 1428 elif fparent1 == nullid:
1428 1429 fparent1, fparent2 = fparent2, nullid
1429 1430 elif fparent2 != nullid:
1430 1431 # is one parent an ancestor of the other?
1431 1432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1432 1433 if fparent1 in fparentancestors:
1433 1434 fparent1, fparent2 = fparent2, nullid
1434 1435 elif fparent2 in fparentancestors:
1435 1436 fparent2 = nullid
1436 1437
1437 1438 # is the file changed?
1438 1439 text = fctx.data()
1439 1440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1440 1441 changelist.append(fname)
1441 1442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1442 1443 # are just the flags changed during merge?
1443 1444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1444 1445 changelist.append(fname)
1445 1446
1446 1447 return fparent1
1447 1448
1448 1449 @unfilteredmethod
1449 1450 def commit(self, text="", user=None, date=None, match=None, force=False,
1450 1451 editor=False, extra=None):
1451 1452 """Add a new revision to current repository.
1452 1453
1453 1454 Revision information is gathered from the working directory,
1454 1455 match can be used to filter the committed files. If editor is
1455 1456 supplied, it is called to get a commit message.
1456 1457 """
1457 1458 if extra is None:
1458 1459 extra = {}
1459 1460
1460 1461 def fail(f, msg):
1461 1462 raise error.Abort('%s: %s' % (f, msg))
1462 1463
1463 1464 if not match:
1464 1465 match = matchmod.always(self.root, '')
1465 1466
1466 1467 if not force:
1467 1468 vdirs = []
1468 1469 match.explicitdir = vdirs.append
1469 1470 match.bad = fail
1470 1471
1471 1472 wlock = lock = tr = None
1472 1473 try:
1473 1474 wlock = self.wlock()
1474 1475 lock = self.lock() # for recent changelog (see issue4368)
1475 1476
1476 1477 wctx = self[None]
1477 1478 merge = len(wctx.parents()) > 1
1478 1479
1479 1480 if not force and merge and match.ispartial():
1480 1481 raise error.Abort(_('cannot partially commit a merge '
1481 1482 '(do not specify files or patterns)'))
1482 1483
1483 1484 status = self.status(match=match, clean=force)
1484 1485 if force:
1485 1486 status.modified.extend(status.clean) # mq may commit clean files
1486 1487
1487 1488 # check subrepos
1488 1489 subs = []
1489 1490 commitsubs = set()
1490 1491 newstate = wctx.substate.copy()
1491 1492 # only manage subrepos and .hgsubstate if .hgsub is present
1492 1493 if '.hgsub' in wctx:
1493 1494 # we'll decide whether to track this ourselves, thanks
1494 1495 for c in status.modified, status.added, status.removed:
1495 1496 if '.hgsubstate' in c:
1496 1497 c.remove('.hgsubstate')
1497 1498
1498 1499 # compare current state to last committed state
1499 1500 # build new substate based on last committed state
1500 1501 oldstate = wctx.p1().substate
1501 1502 for s in sorted(newstate.keys()):
1502 1503 if not match(s):
1503 1504 # ignore working copy, use old state if present
1504 1505 if s in oldstate:
1505 1506 newstate[s] = oldstate[s]
1506 1507 continue
1507 1508 if not force:
1508 1509 raise error.Abort(
1509 1510 _("commit with new subrepo %s excluded") % s)
1510 1511 dirtyreason = wctx.sub(s).dirtyreason(True)
1511 1512 if dirtyreason:
1512 1513 if not self.ui.configbool('ui', 'commitsubrepos'):
1513 1514 raise error.Abort(dirtyreason,
1514 1515 hint=_("use --subrepos for recursive commit"))
1515 1516 subs.append(s)
1516 1517 commitsubs.add(s)
1517 1518 else:
1518 1519 bs = wctx.sub(s).basestate()
1519 1520 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1520 1521 if oldstate.get(s, (None, None, None))[1] != bs:
1521 1522 subs.append(s)
1522 1523
1523 1524 # check for removed subrepos
1524 1525 for p in wctx.parents():
1525 1526 r = [s for s in p.substate if s not in newstate]
1526 1527 subs += [s for s in r if match(s)]
1527 1528 if subs:
1528 1529 if (not match('.hgsub') and
1529 1530 '.hgsub' in (wctx.modified() + wctx.added())):
1530 1531 raise error.Abort(
1531 1532 _("can't commit subrepos without .hgsub"))
1532 1533 status.modified.insert(0, '.hgsubstate')
1533 1534
1534 1535 elif '.hgsub' in status.removed:
1535 1536 # clean up .hgsubstate when .hgsub is removed
1536 1537 if ('.hgsubstate' in wctx and
1537 1538 '.hgsubstate' not in (status.modified + status.added +
1538 1539 status.removed)):
1539 1540 status.removed.insert(0, '.hgsubstate')
1540 1541
1541 1542 # make sure all explicit patterns are matched
1542 1543 if not force and (match.isexact() or match.prefix()):
1543 1544 matched = set(status.modified + status.added + status.removed)
1544 1545
1545 1546 for f in match.files():
1546 1547 f = self.dirstate.normalize(f)
1547 1548 if f == '.' or f in matched or f in wctx.substate:
1548 1549 continue
1549 1550 if f in status.deleted:
1550 1551 fail(f, _('file not found!'))
1551 1552 if f in vdirs: # visited directory
1552 1553 d = f + '/'
1553 1554 for mf in matched:
1554 1555 if mf.startswith(d):
1555 1556 break
1556 1557 else:
1557 1558 fail(f, _("no match under directory!"))
1558 1559 elif f not in self.dirstate:
1559 1560 fail(f, _("file not tracked!"))
1560 1561
1561 1562 cctx = context.workingcommitctx(self, status,
1562 1563 text, user, date, extra)
1563 1564
1564 1565 # internal config: ui.allowemptycommit
1565 1566 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1566 1567 or extra.get('close') or merge or cctx.files()
1567 1568 or self.ui.configbool('ui', 'allowemptycommit'))
1568 1569 if not allowemptycommit:
1569 1570 return None
1570 1571
1571 1572 if merge and cctx.deleted():
1572 1573 raise error.Abort(_("cannot commit merge with missing files"))
1573 1574
1574 1575 ms = mergemod.mergestate.read(self)
1575 1576
1576 1577 if list(ms.unresolved()):
1577 1578 raise error.Abort(_('unresolved merge conflicts '
1578 1579 '(see "hg help resolve")'))
1579 1580 if ms.mdstate() != 's' or list(ms.driverresolved()):
1580 1581 raise error.Abort(_('driver-resolved merge conflicts'),
1581 1582 hint=_('run "hg resolve --all" to resolve'))
1582 1583
1583 1584 if editor:
1584 1585 cctx._text = editor(self, cctx, subs)
1585 1586 edited = (text != cctx._text)
1586 1587
1587 1588 # Save commit message in case this transaction gets rolled back
1588 1589 # (e.g. by a pretxncommit hook). Leave the content alone on
1589 1590 # the assumption that the user will use the same editor again.
1590 1591 msgfn = self.savecommitmessage(cctx._text)
1591 1592
1592 1593 # commit subs and write new state
1593 1594 if subs:
1594 1595 for s in sorted(commitsubs):
1595 1596 sub = wctx.sub(s)
1596 1597 self.ui.status(_('committing subrepository %s\n') %
1597 1598 subrepo.subrelpath(sub))
1598 1599 sr = sub.commit(cctx._text, user, date)
1599 1600 newstate[s] = (newstate[s][0], sr)
1600 1601 subrepo.writestate(self, newstate)
1601 1602
1602 1603 p1, p2 = self.dirstate.parents()
1603 1604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1604 1605 try:
1605 1606 self.hook("precommit", throw=True, parent1=hookp1,
1606 1607 parent2=hookp2)
1607 1608 tr = self.transaction('commit')
1608 1609 ret = self.commitctx(cctx, True)
1609 1610 except: # re-raises
1610 1611 if edited:
1611 1612 self.ui.write(
1612 1613 _('note: commit message saved in %s\n') % msgfn)
1613 1614 raise
1614 1615 # update bookmarks, dirstate and mergestate
1615 1616 bookmarks.update(self, [p1, p2], ret)
1616 1617 cctx.markcommitted(ret)
1617 1618 ms.reset()
1618 1619 tr.close()
1619 1620
1620 1621 finally:
1621 1622 lockmod.release(tr, lock, wlock)
1622 1623
1623 1624 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1624 1625 # hack for command that use a temporary commit (eg: histedit)
1625 1626 # temporary commit got stripped before hook release
1626 1627 if self.changelog.hasnode(ret):
1627 1628 self.hook("commit", node=node, parent1=parent1,
1628 1629 parent2=parent2)
1629 1630 self._afterlock(commithook)
1630 1631 return ret
1631 1632
1632 1633 @unfilteredmethod
1633 1634 def commitctx(self, ctx, error=False):
1634 1635 """Add a new revision to current repository.
1635 1636 Revision information is passed via the context argument.
1636 1637 """
1637 1638
1638 1639 tr = None
1639 1640 p1, p2 = ctx.p1(), ctx.p2()
1640 1641 user = ctx.user()
1641 1642
1642 1643 lock = self.lock()
1643 1644 try:
1644 1645 tr = self.transaction("commit")
1645 1646 trp = weakref.proxy(tr)
1646 1647
1647 1648 if ctx.files():
1648 1649 m1 = p1.manifest()
1649 1650 m2 = p2.manifest()
1650 1651 m = m1.copy()
1651 1652
1652 1653 # check in files
1653 1654 added = []
1654 1655 changed = []
1655 1656 removed = list(ctx.removed())
1656 1657 linkrev = len(self)
1657 1658 self.ui.note(_("committing files:\n"))
1658 1659 for f in sorted(ctx.modified() + ctx.added()):
1659 1660 self.ui.note(f + "\n")
1660 1661 try:
1661 1662 fctx = ctx[f]
1662 1663 if fctx is None:
1663 1664 removed.append(f)
1664 1665 else:
1665 1666 added.append(f)
1666 1667 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1667 1668 trp, changed)
1668 1669 m.setflag(f, fctx.flags())
1669 1670 except OSError as inst:
1670 1671 self.ui.warn(_("trouble committing %s!\n") % f)
1671 1672 raise
1672 1673 except IOError as inst:
1673 1674 errcode = getattr(inst, 'errno', errno.ENOENT)
1674 1675 if error or errcode and errcode != errno.ENOENT:
1675 1676 self.ui.warn(_("trouble committing %s!\n") % f)
1676 1677 raise
1677 1678
1678 1679 # update manifest
1679 1680 self.ui.note(_("committing manifest\n"))
1680 1681 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1681 1682 drop = [f for f in removed if f in m]
1682 1683 for f in drop:
1683 1684 del m[f]
1684 1685 mn = self.manifest.add(m, trp, linkrev,
1685 1686 p1.manifestnode(), p2.manifestnode(),
1686 1687 added, drop)
1687 1688 files = changed + removed
1688 1689 else:
1689 1690 mn = p1.manifestnode()
1690 1691 files = []
1691 1692
1692 1693 # update changelog
1693 1694 self.ui.note(_("committing changelog\n"))
1694 1695 self.changelog.delayupdate(tr)
1695 1696 n = self.changelog.add(mn, files, ctx.description(),
1696 1697 trp, p1.node(), p2.node(),
1697 1698 user, ctx.date(), ctx.extra().copy())
1698 1699 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1699 1700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1700 1701 parent2=xp2)
1701 1702 # set the new commit is proper phase
1702 1703 targetphase = subrepo.newcommitphase(self.ui, ctx)
1703 1704 if targetphase:
1704 1705 # retract boundary do not alter parent changeset.
1705 1706 # if a parent have higher the resulting phase will
1706 1707 # be compliant anyway
1707 1708 #
1708 1709 # if minimal phase was 0 we don't need to retract anything
1709 1710 phases.retractboundary(self, tr, targetphase, [n])
1710 1711 tr.close()
1711 1712 branchmap.updatecache(self.filtered('served'))
1712 1713 return n
1713 1714 finally:
1714 1715 if tr:
1715 1716 tr.release()
1716 1717 lock.release()
1717 1718
1718 1719 @unfilteredmethod
1719 1720 def destroying(self):
1720 1721 '''Inform the repository that nodes are about to be destroyed.
1721 1722 Intended for use by strip and rollback, so there's a common
1722 1723 place for anything that has to be done before destroying history.
1723 1724
1724 1725 This is mostly useful for saving state that is in memory and waiting
1725 1726 to be flushed when the current lock is released. Because a call to
1726 1727 destroyed is imminent, the repo will be invalidated causing those
1727 1728 changes to stay in memory (waiting for the next unlock), or vanish
1728 1729 completely.
1729 1730 '''
1730 1731 # When using the same lock to commit and strip, the phasecache is left
1731 1732 # dirty after committing. Then when we strip, the repo is invalidated,
1732 1733 # causing those changes to disappear.
1733 1734 if '_phasecache' in vars(self):
1734 1735 self._phasecache.write()
1735 1736
1736 1737 @unfilteredmethod
1737 1738 def destroyed(self):
1738 1739 '''Inform the repository that nodes have been destroyed.
1739 1740 Intended for use by strip and rollback, so there's a common
1740 1741 place for anything that has to be done after destroying history.
1741 1742 '''
1742 1743 # When one tries to:
1743 1744 # 1) destroy nodes thus calling this method (e.g. strip)
1744 1745 # 2) use phasecache somewhere (e.g. commit)
1745 1746 #
1746 1747 # then 2) will fail because the phasecache contains nodes that were
1747 1748 # removed. We can either remove phasecache from the filecache,
1748 1749 # causing it to reload next time it is accessed, or simply filter
1749 1750 # the removed nodes now and write the updated cache.
1750 1751 self._phasecache.filterunknown(self)
1751 1752 self._phasecache.write()
1752 1753
1753 1754 # update the 'served' branch cache to help read only server process
1754 1755 # Thanks to branchcache collaboration this is done from the nearest
1755 1756 # filtered subset and it is expected to be fast.
1756 1757 branchmap.updatecache(self.filtered('served'))
1757 1758
1758 1759 # Ensure the persistent tag cache is updated. Doing it now
1759 1760 # means that the tag cache only has to worry about destroyed
1760 1761 # heads immediately after a strip/rollback. That in turn
1761 1762 # guarantees that "cachetip == currenttip" (comparing both rev
1762 1763 # and node) always means no nodes have been added or destroyed.
1763 1764
1764 1765 # XXX this is suboptimal when qrefresh'ing: we strip the current
1765 1766 # head, refresh the tag cache, then immediately add a new head.
1766 1767 # But I think doing it this way is necessary for the "instant
1767 1768 # tag cache retrieval" case to work.
1768 1769 self.invalidate()
1769 1770
1770 1771 def walk(self, match, node=None):
1771 1772 '''
1772 1773 walk recursively through the directory tree or a given
1773 1774 changeset, finding all files matched by the match
1774 1775 function
1775 1776 '''
1776 1777 return self[node].walk(match)
1777 1778
1778 1779 def status(self, node1='.', node2=None, match=None,
1779 1780 ignored=False, clean=False, unknown=False,
1780 1781 listsubrepos=False):
1781 1782 '''a convenience method that calls node1.status(node2)'''
1782 1783 return self[node1].status(node2, match, ignored, clean, unknown,
1783 1784 listsubrepos)
1784 1785
1785 1786 def heads(self, start=None):
1786 1787 heads = self.changelog.heads(start)
1787 1788 # sort the output in rev descending order
1788 1789 return sorted(heads, key=self.changelog.rev, reverse=True)
1789 1790
1790 1791 def branchheads(self, branch=None, start=None, closed=False):
1791 1792 '''return a (possibly filtered) list of heads for the given branch
1792 1793
1793 1794 Heads are returned in topological order, from newest to oldest.
1794 1795 If branch is None, use the dirstate branch.
1795 1796 If start is not None, return only heads reachable from start.
1796 1797 If closed is True, return heads that are marked as closed as well.
1797 1798 '''
1798 1799 if branch is None:
1799 1800 branch = self[None].branch()
1800 1801 branches = self.branchmap()
1801 1802 if branch not in branches:
1802 1803 return []
1803 1804 # the cache returns heads ordered lowest to highest
1804 1805 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1805 1806 if start is not None:
1806 1807 # filter out the heads that cannot be reached from startrev
1807 1808 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1808 1809 bheads = [h for h in bheads if h in fbheads]
1809 1810 return bheads
1810 1811
1811 1812 def branches(self, nodes):
1812 1813 if not nodes:
1813 1814 nodes = [self.changelog.tip()]
1814 1815 b = []
1815 1816 for n in nodes:
1816 1817 t = n
1817 1818 while True:
1818 1819 p = self.changelog.parents(n)
1819 1820 if p[1] != nullid or p[0] == nullid:
1820 1821 b.append((t, n, p[0], p[1]))
1821 1822 break
1822 1823 n = p[0]
1823 1824 return b
1824 1825
1825 1826 def between(self, pairs):
1826 1827 r = []
1827 1828
1828 1829 for top, bottom in pairs:
1829 1830 n, l, i = top, [], 0
1830 1831 f = 1
1831 1832
1832 1833 while n != bottom and n != nullid:
1833 1834 p = self.changelog.parents(n)[0]
1834 1835 if i == f:
1835 1836 l.append(n)
1836 1837 f = f * 2
1837 1838 n = p
1838 1839 i += 1
1839 1840
1840 1841 r.append(l)
1841 1842
1842 1843 return r
1843 1844
1844 1845 def checkpush(self, pushop):
1845 1846 """Extensions can override this function if additional checks have
1846 1847 to be performed before pushing, or call it if they override push
1847 1848 command.
1848 1849 """
1849 1850 pass
1850 1851
1851 1852 @unfilteredpropertycache
1852 1853 def prepushoutgoinghooks(self):
1853 1854 """Return util.hooks consists of "(repo, remote, outgoing)"
1854 1855 functions, which are called before pushing changesets.
1855 1856 """
1856 1857 return util.hooks()
1857 1858
1858 1859 def pushkey(self, namespace, key, old, new):
1859 1860 try:
1860 1861 tr = self.currenttransaction()
1861 1862 hookargs = {}
1862 1863 if tr is not None:
1863 1864 hookargs.update(tr.hookargs)
1864 1865 hookargs['namespace'] = namespace
1865 1866 hookargs['key'] = key
1866 1867 hookargs['old'] = old
1867 1868 hookargs['new'] = new
1868 1869 self.hook('prepushkey', throw=True, **hookargs)
1869 1870 except error.HookAbort as exc:
1870 1871 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1871 1872 if exc.hint:
1872 1873 self.ui.write_err(_("(%s)\n") % exc.hint)
1873 1874 return False
1874 1875 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1875 1876 ret = pushkey.push(self, namespace, key, old, new)
1876 1877 def runhook():
1877 1878 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1878 1879 ret=ret)
1879 1880 self._afterlock(runhook)
1880 1881 return ret
1881 1882
1882 1883 def listkeys(self, namespace):
1883 1884 self.hook('prelistkeys', throw=True, namespace=namespace)
1884 1885 self.ui.debug('listing keys for "%s"\n' % namespace)
1885 1886 values = pushkey.list(self, namespace)
1886 1887 self.hook('listkeys', namespace=namespace, values=values)
1887 1888 return values
1888 1889
1889 1890 def debugwireargs(self, one, two, three=None, four=None, five=None):
1890 1891 '''used to test argument passing over the wire'''
1891 1892 return "%s %s %s %s %s" % (one, two, three, four, five)
1892 1893
1893 1894 def savecommitmessage(self, text):
1894 1895 fp = self.vfs('last-message.txt', 'wb')
1895 1896 try:
1896 1897 fp.write(text)
1897 1898 finally:
1898 1899 fp.close()
1899 1900 return self.pathto(fp.name[len(self.root) + 1:])
1900 1901
1901 1902 # used to avoid circular references so destructors work
1902 1903 def aftertrans(files):
1903 1904 renamefiles = [tuple(t) for t in files]
1904 1905 def a():
1905 1906 for vfs, src, dest in renamefiles:
1906 1907 try:
1907 1908 vfs.rename(src, dest)
1908 1909 except OSError: # journal file does not yet exist
1909 1910 pass
1910 1911 return a
1911 1912
1912 1913 def undoname(fn):
1913 1914 base, name = os.path.split(fn)
1914 1915 assert name.startswith('journal')
1915 1916 return os.path.join(base, name.replace('journal', 'undo', 1))
1916 1917
1917 1918 def instance(ui, path, create):
1918 1919 return localrepository(ui, util.urllocalpath(path), create)
1919 1920
1920 1921 def islocal(path):
1921 1922 return True
@@ -1,158 +1,154 b''
1 1 Test changesets filtering during exchanges (some tests are still in
2 2 test-obsolete.t)
3 3
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > evolution=createmarkers
7 7 > EOF
8 8
9 9 Push does not corrupt remote
10 10 ----------------------------
11 11
12 12 Create a DAG where a changeset reuses a revision from a file first used in an
13 13 extinct changeset.
14 14
15 15 $ hg init local
16 16 $ cd local
17 17 $ echo 'base' > base
18 18 $ hg commit -Am base
19 19 adding base
20 20 $ echo 'A' > A
21 21 $ hg commit -Am A
22 22 adding A
23 23 $ hg up 0
24 24 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
25 25 $ hg revert -ar 1
26 26 adding A
27 27 $ hg commit -Am "A'"
28 28 created new head
29 29 $ hg log -G --template='{desc} {node}'
30 30 @ A' f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
31 31 |
32 32 | o A 9d73aac1b2ed7d53835eaeec212ed41ea47da53a
33 33 |/
34 34 o base d20a80d4def38df63a4b330b7fb688f3d4cae1e3
35 35
36 36 $ hg debugobsolete 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
37 37
38 38 Push it. The bundle should not refer to the extinct changeset.
39 39
40 40 $ hg init ../other
41 41 $ hg push ../other
42 42 pushing to ../other
43 43 searching for changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48 $ hg -R ../other verify
49 49 checking changesets
50 50 checking manifests
51 51 crosschecking files in changesets and manifests
52 52 checking files
53 53 2 files, 2 changesets, 2 total revisions
54 54
55 55 Adding a changeset going extinct locally
56 56 ------------------------------------------
57 57
58 58 Pull a changeset that will immediatly goes extinct (because you already have a
59 59 marker to obsolete him)
60 60 (test resolution of issue3788)
61 61
62 62 $ hg phase --draft --force f89bcc95eba5
63 63 $ hg phase -R ../other --draft --force f89bcc95eba5
64 64 $ hg commit --amend -m "A''"
65 65 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
66 66 $ hg pull ../other
67 67 pulling from ../other
68 68 searching for changes
69 69 adding changesets
70 70 adding manifests
71 71 adding file changes
72 72 added 1 changesets with 0 changes to 1 files (+1 heads)
73 73 (run 'hg heads' to see heads, 'hg merge' to merge)
74 74
75 75 check that bundle is not affected
76 76
77 77 $ hg bundle --hidden --rev f89bcc95eba5 --base "f89bcc95eba5^" ../f89bcc95eba5.hg
78 78 1 changesets found
79 79 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
80 80 $ hg unbundle ../f89bcc95eba5.hg
81 81 adding changesets
82 82 adding manifests
83 83 adding file changes
84 84 added 1 changesets with 0 changes to 1 files (+1 heads)
85 85 (run 'hg heads' to see heads)
86 86 $ cd ..
87 87
88 88 pull does not fetch excessive changesets when common node is hidden (issue4982)
89 89 -------------------------------------------------------------------------------
90 90
91 91 initial repo with server and client matching
92 92
93 93 $ hg init pull-hidden-common
94 94 $ cd pull-hidden-common
95 95 $ touch foo
96 96 $ hg -q commit -A -m initial
97 97 $ echo 1 > foo
98 98 $ hg commit -m 1
99 99 $ echo 2a > foo
100 100 $ hg commit -m 2a
101 101 $ cd ..
102 102 $ hg clone --pull pull-hidden-common pull-hidden-common-client
103 103 requesting all changes
104 104 adding changesets
105 105 adding manifests
106 106 adding file changes
107 107 added 3 changesets with 3 changes to 1 files
108 108 updating to branch default
109 109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 110
111 111 server obsoletes the old head
112 112
113 113 $ cd pull-hidden-common
114 114 $ hg -q up -r 1
115 115 $ echo 2b > foo
116 116 $ hg -q commit -m 2b
117 117 $ hg debugobsolete 6a29ed9c68defff1a139e5c6fa9696fb1a75783d bec0734cd68e84477ba7fc1d13e6cff53ab70129
118 118 $ cd ..
119 119
120 120 client only pulls down 1 changeset
121 ("all local heads known remotely" may change if the wire protocol discovery
122 commands ever stop saying they have hidden changesets)
123 121
124 122 $ cd pull-hidden-common-client
125 123 $ hg pull --debug
126 124 pulling from $TESTTMP/pull-hidden-common (glob)
127 125 query 1; heads
128 126 searching for changes
129 all local heads known remotely
130 3 changesets found
127 taking quick initial sample
128 query 2; still undecided: 2, sample size is: 2
129 2 total queries
130 1 changesets found
131 131 list of changesets:
132 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
133 a33779fdfc23063680fc31e9ff637dff6876d3d2
134 132 bec0734cd68e84477ba7fc1d13e6cff53ab70129
135 133 listing keys for "phase"
136 134 listing keys for "bookmarks"
137 135 bundle2-output-bundle: "HG20", 3 parts total
138 136 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
139 137 bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
140 138 bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
141 139 bundle2-input-bundle: with-transaction
142 140 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
143 141 adding changesets
144 add changeset 96ee1d7354c4
145 add changeset a33779fdfc23
146 142 add changeset bec0734cd68e
147 143 adding manifests
148 144 adding file changes
149 145 adding foo revisions
150 146 added 1 changesets with 1 changes to 1 files (+1 heads)
151 bundle2-input-part: total payload size 1378
147 bundle2-input-part: total payload size 474
152 148 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
153 149 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
154 150 bundle2-input-bundle: 2 parts total
155 151 checking for updated bookmarks
156 152 listing keys for "phases"
157 153 updating the branch cache
158 154 (run 'hg heads' to see heads, 'hg merge' to merge)
@@ -1,977 +1,975 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [phases]
3 3 > # public changeset are not obsolete
4 4 > publish=false
5 5 > [ui]
6 6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 7 > [experimental]
8 8 > # drop me once bundle2 is the default,
9 9 > # added to get test change early.
10 10 > bundle2-exp = True
11 11 > EOF
12 12 $ mkcommit() {
13 13 > echo "$1" > "$1"
14 14 > hg add "$1"
15 15 > hg ci -m "add $1"
16 16 > }
17 17 $ getid() {
18 18 > hg log -T "{node}\n" --hidden -r "desc('$1')"
19 19 > }
20 20
21 21 $ cat > debugkeys.py <<EOF
22 22 > def reposetup(ui, repo):
23 23 > class debugkeysrepo(repo.__class__):
24 24 > def listkeys(self, namespace):
25 25 > ui.write('listkeys %s\n' % (namespace,))
26 26 > return super(debugkeysrepo, self).listkeys(namespace)
27 27 >
28 28 > if repo.local():
29 29 > repo.__class__ = debugkeysrepo
30 30 > EOF
31 31
32 32 $ hg init tmpa
33 33 $ cd tmpa
34 34 $ mkcommit kill_me
35 35
36 36 Checking that the feature is properly disabled
37 37
38 38 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
39 39 abort: creating obsolete markers is not enabled on this repo
40 40 [255]
41 41
42 42 Enabling it
43 43
44 44 $ cat >> $HGRCPATH << EOF
45 45 > [experimental]
46 46 > evolution=createmarkers,exchange
47 47 > EOF
48 48
49 49 Killing a single changeset without replacement
50 50
51 51 $ hg debugobsolete 0
52 52 abort: changeset references must be full hexadecimal node identifiers
53 53 [255]
54 54 $ hg debugobsolete '00'
55 55 abort: changeset references must be full hexadecimal node identifiers
56 56 [255]
57 57 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
58 58 $ hg debugobsolete
59 59 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
60 60
61 61 (test that mercurial is not confused)
62 62
63 63 $ hg up null --quiet # having 0 as parent prevents it to be hidden
64 64 $ hg tip
65 65 -1:000000000000 (public) [tip ]
66 66 $ hg up --hidden tip --quiet
67 67
68 68 Killing a single changeset with itself should fail
69 69 (simple local safeguard)
70 70
71 71 $ hg debugobsolete `getid kill_me` `getid kill_me`
72 72 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
73 73 [255]
74 74
75 75 $ cd ..
76 76
77 77 Killing a single changeset with replacement
78 78 (and testing the format option)
79 79
80 80 $ hg init tmpb
81 81 $ cd tmpb
82 82 $ mkcommit a
83 83 $ mkcommit b
84 84 $ mkcommit original_c
85 85 $ hg up "desc('b')"
86 86 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 87 $ mkcommit new_c
88 88 created new head
89 89 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
90 90 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
91 91 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
92 92 2:245bde4270cd add original_c
93 93 $ hg debugrevlog -cd
94 94 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
95 95 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
96 96 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
97 97 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
98 98 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
99 99 $ hg debugobsolete
100 100 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
101 101
102 102 (check for version number of the obsstore)
103 103
104 104 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
105 105 \x00 (no-eol) (esc)
106 106
107 107 do it again (it read the obsstore before adding new changeset)
108 108
109 109 $ hg up '.^'
110 110 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
111 111 $ mkcommit new_2_c
112 112 created new head
113 113 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
114 114 $ hg debugobsolete
115 115 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
116 116 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
117 117
118 118 Register two markers with a missing node
119 119
120 120 $ hg up '.^'
121 121 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
122 122 $ mkcommit new_3_c
123 123 created new head
124 124 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
125 125 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
126 126 $ hg debugobsolete
127 127 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
128 128 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
129 129 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
130 130 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
131 131
132 132 Refuse pathological nullid successors
133 133 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
134 134 transaction abort!
135 135 rollback completed
136 136 abort: bad obsolescence marker detected: invalid successors nullid
137 137 [255]
138 138
139 139 Check that graphlog detect that a changeset is obsolete:
140 140
141 141 $ hg log -G
142 142 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
143 143 |
144 144 o 1:7c3bad9141dc (draft) [ ] add b
145 145 |
146 146 o 0:1f0dee641bb7 (draft) [ ] add a
147 147
148 148
149 149 check that heads does not report them
150 150
151 151 $ hg heads
152 152 5:5601fb93a350 (draft) [tip ] add new_3_c
153 153 $ hg heads --hidden
154 154 5:5601fb93a350 (draft) [tip ] add new_3_c
155 155 4:ca819180edb9 (draft) [ ] add new_2_c
156 156 3:cdbce2fbb163 (draft) [ ] add new_c
157 157 2:245bde4270cd (draft) [ ] add original_c
158 158
159 159
160 160 check that summary does not report them
161 161
162 162 $ hg init ../sink
163 163 $ echo '[paths]' >> .hg/hgrc
164 164 $ echo 'default=../sink' >> .hg/hgrc
165 165 $ hg summary --remote
166 166 parent: 5:5601fb93a350 tip
167 167 add new_3_c
168 168 branch: default
169 169 commit: (clean)
170 170 update: (current)
171 171 phases: 3 draft
172 172 remote: 3 outgoing
173 173
174 174 $ hg summary --remote --hidden
175 175 parent: 5:5601fb93a350 tip
176 176 add new_3_c
177 177 branch: default
178 178 commit: (clean)
179 179 update: 3 new changesets, 4 branch heads (merge)
180 180 phases: 6 draft
181 181 remote: 3 outgoing
182 182
183 183 check that various commands work well with filtering
184 184
185 185 $ hg tip
186 186 5:5601fb93a350 (draft) [tip ] add new_3_c
187 187 $ hg log -r 6
188 188 abort: unknown revision '6'!
189 189 [255]
190 190 $ hg log -r 4
191 191 abort: hidden revision '4'!
192 192 (use --hidden to access hidden revisions)
193 193 [255]
194 194 $ hg debugrevspec 'rev(6)'
195 195 $ hg debugrevspec 'rev(4)'
196 196 $ hg debugrevspec 'null'
197 197 -1
198 198
199 199 Check that public changeset are not accounted as obsolete:
200 200
201 201 $ hg --hidden phase --public 2
202 202 $ hg log -G
203 203 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
204 204 |
205 205 | o 2:245bde4270cd (public) [ ] add original_c
206 206 |/
207 207 o 1:7c3bad9141dc (public) [ ] add b
208 208 |
209 209 o 0:1f0dee641bb7 (public) [ ] add a
210 210
211 211
212 212 And that bumped changeset are detected
213 213 --------------------------------------
214 214
215 215 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
216 216 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
217 217 the public changeset
218 218
219 219 $ hg log --hidden -r 'bumped()'
220 220 5:5601fb93a350 (draft) [tip ] add new_3_c
221 221
222 222 And that we can't push bumped changeset
223 223
224 224 $ hg push ../tmpa -r 0 --force #(make repo related)
225 225 pushing to ../tmpa
226 226 searching for changes
227 227 warning: repository is unrelated
228 228 adding changesets
229 229 adding manifests
230 230 adding file changes
231 231 added 1 changesets with 1 changes to 1 files (+1 heads)
232 232 $ hg push ../tmpa
233 233 pushing to ../tmpa
234 234 searching for changes
235 235 abort: push includes bumped changeset: 5601fb93a350!
236 236 [255]
237 237
238 238 Fixing "bumped" situation
239 239 We need to create a clone of 5 and add a special marker with a flag
240 240
241 241 $ hg up '5^'
242 242 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
243 243 $ hg revert -ar 5
244 244 adding new_3_c
245 245 $ hg ci -m 'add n3w_3_c'
246 246 created new head
247 247 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
248 248 $ hg log -r 'bumped()'
249 249 $ hg log -G
250 250 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
251 251 |
252 252 | o 2:245bde4270cd (public) [ ] add original_c
253 253 |/
254 254 o 1:7c3bad9141dc (public) [ ] add b
255 255 |
256 256 o 0:1f0dee641bb7 (public) [ ] add a
257 257
258 258
259 259 $ cd ..
260 260
261 261 Revision 0 is hidden
262 262 --------------------
263 263
264 264 $ hg init rev0hidden
265 265 $ cd rev0hidden
266 266
267 267 $ mkcommit kill0
268 268 $ hg up -q null
269 269 $ hg debugobsolete `getid kill0`
270 270 $ mkcommit a
271 271 $ mkcommit b
272 272
273 273 Should pick the first visible revision as "repo" node
274 274
275 275 $ hg archive ../archive-null
276 276 $ cat ../archive-null/.hg_archival.txt
277 277 repo: 1f0dee641bb7258c56bd60e93edfa2405381c41e
278 278 node: 7c3bad9141dcb46ff89abf5f61856facd56e476c
279 279 branch: default
280 280 latesttag: null
281 281 latesttagdistance: 2
282 282 changessincelatesttag: 2
283 283
284 284
285 285 $ cd ..
286 286
287 287 Exchange Test
288 288 ============================
289 289
290 290 Destination repo does not have any data
291 291 ---------------------------------------
292 292
293 293 Simple incoming test
294 294
295 295 $ hg init tmpc
296 296 $ cd tmpc
297 297 $ hg incoming ../tmpb
298 298 comparing with ../tmpb
299 299 0:1f0dee641bb7 (public) [ ] add a
300 300 1:7c3bad9141dc (public) [ ] add b
301 301 2:245bde4270cd (public) [ ] add original_c
302 302 6:6f9641995072 (draft) [tip ] add n3w_3_c
303 303
304 304 Try to pull markers
305 305 (extinct changeset are excluded but marker are pushed)
306 306
307 307 $ hg pull ../tmpb
308 308 pulling from ../tmpb
309 309 requesting all changes
310 310 adding changesets
311 311 adding manifests
312 312 adding file changes
313 313 added 4 changesets with 4 changes to 4 files (+1 heads)
314 314 5 new obsolescence markers
315 315 (run 'hg heads' to see heads, 'hg merge' to merge)
316 316 $ hg debugobsolete
317 317 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
318 318 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
319 319 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
320 320 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
321 321 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
322 322
323 323 Rollback//Transaction support
324 324
325 325 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
326 326 $ hg debugobsolete
327 327 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
328 328 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
329 329 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
330 330 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
331 331 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
332 332 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
333 333 $ hg rollback -n
334 334 repository tip rolled back to revision 3 (undo debugobsolete)
335 335 $ hg rollback
336 336 repository tip rolled back to revision 3 (undo debugobsolete)
337 337 $ hg debugobsolete
338 338 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
339 339 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
340 340 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
341 341 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
342 342 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
343 343
344 344 $ cd ..
345 345
346 346 Try to push markers
347 347
348 348 $ hg init tmpd
349 349 $ hg -R tmpb push tmpd
350 350 pushing to tmpd
351 351 searching for changes
352 352 adding changesets
353 353 adding manifests
354 354 adding file changes
355 355 added 4 changesets with 4 changes to 4 files (+1 heads)
356 356 5 new obsolescence markers
357 357 $ hg -R tmpd debugobsolete | sort
358 358 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
359 359 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
360 360 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
361 361 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
362 362 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
363 363
364 364 Check obsolete keys are exchanged only if source has an obsolete store
365 365
366 366 $ hg init empty
367 367 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
368 368 pushing to tmpd
369 369 listkeys phases
370 370 listkeys bookmarks
371 371 no changes found
372 372 listkeys phases
373 373 [1]
374 374
375 375 clone support
376 376 (markers are copied and extinct changesets are included to allow hardlinks)
377 377
378 378 $ hg clone tmpb clone-dest
379 379 updating to branch default
380 380 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
381 381 $ hg -R clone-dest log -G --hidden
382 382 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
383 383 |
384 384 | x 5:5601fb93a350 (draft) [ ] add new_3_c
385 385 |/
386 386 | x 4:ca819180edb9 (draft) [ ] add new_2_c
387 387 |/
388 388 | x 3:cdbce2fbb163 (draft) [ ] add new_c
389 389 |/
390 390 | o 2:245bde4270cd (public) [ ] add original_c
391 391 |/
392 392 o 1:7c3bad9141dc (public) [ ] add b
393 393 |
394 394 o 0:1f0dee641bb7 (public) [ ] add a
395 395
396 396 $ hg -R clone-dest debugobsolete
397 397 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
398 398 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
399 399 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
400 400 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
401 401 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
402 402
403 403
404 404 Destination repo have existing data
405 405 ---------------------------------------
406 406
407 407 On pull
408 408
409 409 $ hg init tmpe
410 410 $ cd tmpe
411 411 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
412 412 $ hg pull ../tmpb
413 413 pulling from ../tmpb
414 414 requesting all changes
415 415 adding changesets
416 416 adding manifests
417 417 adding file changes
418 418 added 4 changesets with 4 changes to 4 files (+1 heads)
419 419 5 new obsolescence markers
420 420 (run 'hg heads' to see heads, 'hg merge' to merge)
421 421 $ hg debugobsolete
422 422 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
423 423 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
424 424 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
425 425 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
426 426 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
427 427 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
428 428
429 429
430 430 On push
431 431
432 432 $ hg push ../tmpc
433 433 pushing to ../tmpc
434 434 searching for changes
435 435 no changes found
436 436 1 new obsolescence markers
437 437 [1]
438 438 $ hg -R ../tmpc debugobsolete
439 439 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
440 440 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
441 441 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
442 442 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
443 443 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
444 444 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
445 445
446 446 detect outgoing obsolete and unstable
447 447 ---------------------------------------
448 448
449 449
450 450 $ hg log -G
451 451 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
452 452 |
453 453 | o 2:245bde4270cd (public) [ ] add original_c
454 454 |/
455 455 o 1:7c3bad9141dc (public) [ ] add b
456 456 |
457 457 o 0:1f0dee641bb7 (public) [ ] add a
458 458
459 459 $ hg up 'desc("n3w_3_c")'
460 460 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
461 461 $ mkcommit original_d
462 462 $ mkcommit original_e
463 463 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
464 464 $ hg debugobsolete | grep `getid original_d`
465 465 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
466 466 $ hg log -r 'obsolete()'
467 467 4:94b33453f93b (draft) [ ] add original_d
468 468 $ hg log -G -r '::unstable()'
469 469 @ 5:cda648ca50f5 (draft) [tip ] add original_e
470 470 |
471 471 x 4:94b33453f93b (draft) [ ] add original_d
472 472 |
473 473 o 3:6f9641995072 (draft) [ ] add n3w_3_c
474 474 |
475 475 o 1:7c3bad9141dc (public) [ ] add b
476 476 |
477 477 o 0:1f0dee641bb7 (public) [ ] add a
478 478
479 479
480 480 refuse to push obsolete changeset
481 481
482 482 $ hg push ../tmpc/ -r 'desc("original_d")'
483 483 pushing to ../tmpc/
484 484 searching for changes
485 485 abort: push includes obsolete changeset: 94b33453f93b!
486 486 [255]
487 487
488 488 refuse to push unstable changeset
489 489
490 490 $ hg push ../tmpc/
491 491 pushing to ../tmpc/
492 492 searching for changes
493 493 abort: push includes unstable changeset: cda648ca50f5!
494 494 [255]
495 495
496 496 Test that extinct changeset are properly detected
497 497
498 498 $ hg log -r 'extinct()'
499 499
500 500 Don't try to push extinct changeset
501 501
502 502 $ hg init ../tmpf
503 503 $ hg out ../tmpf
504 504 comparing with ../tmpf
505 505 searching for changes
506 506 0:1f0dee641bb7 (public) [ ] add a
507 507 1:7c3bad9141dc (public) [ ] add b
508 508 2:245bde4270cd (public) [ ] add original_c
509 509 3:6f9641995072 (draft) [ ] add n3w_3_c
510 510 4:94b33453f93b (draft) [ ] add original_d
511 511 5:cda648ca50f5 (draft) [tip ] add original_e
512 512 $ hg push ../tmpf -f # -f because be push unstable too
513 513 pushing to ../tmpf
514 514 searching for changes
515 515 adding changesets
516 516 adding manifests
517 517 adding file changes
518 518 added 6 changesets with 6 changes to 6 files (+1 heads)
519 519 7 new obsolescence markers
520 520
521 521 no warning displayed
522 522
523 523 $ hg push ../tmpf
524 524 pushing to ../tmpf
525 525 searching for changes
526 526 no changes found
527 527 [1]
528 528
529 529 Do not warn about new head when the new head is a successors of a remote one
530 530
531 531 $ hg log -G
532 532 @ 5:cda648ca50f5 (draft) [tip ] add original_e
533 533 |
534 534 x 4:94b33453f93b (draft) [ ] add original_d
535 535 |
536 536 o 3:6f9641995072 (draft) [ ] add n3w_3_c
537 537 |
538 538 | o 2:245bde4270cd (public) [ ] add original_c
539 539 |/
540 540 o 1:7c3bad9141dc (public) [ ] add b
541 541 |
542 542 o 0:1f0dee641bb7 (public) [ ] add a
543 543
544 544 $ hg up -q 'desc(n3w_3_c)'
545 545 $ mkcommit obsolete_e
546 546 created new head
547 547 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
548 548 $ hg outgoing ../tmpf # parasite hg outgoing testin
549 549 comparing with ../tmpf
550 550 searching for changes
551 551 6:3de5eca88c00 (draft) [tip ] add obsolete_e
552 552 $ hg push ../tmpf
553 553 pushing to ../tmpf
554 554 searching for changes
555 555 adding changesets
556 556 adding manifests
557 557 adding file changes
558 558 added 1 changesets with 1 changes to 1 files (+1 heads)
559 559 1 new obsolescence markers
560 560
561 561 test relevance computation
562 562 ---------------------------------------
563 563
564 564 Checking simple case of "marker relevance".
565 565
566 566
567 567 Reminder of the repo situation
568 568
569 569 $ hg log --hidden --graph
570 570 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
571 571 |
572 572 | x 5:cda648ca50f5 (draft) [ ] add original_e
573 573 | |
574 574 | x 4:94b33453f93b (draft) [ ] add original_d
575 575 |/
576 576 o 3:6f9641995072 (draft) [ ] add n3w_3_c
577 577 |
578 578 | o 2:245bde4270cd (public) [ ] add original_c
579 579 |/
580 580 o 1:7c3bad9141dc (public) [ ] add b
581 581 |
582 582 o 0:1f0dee641bb7 (public) [ ] add a
583 583
584 584
585 585 List of all markers
586 586
587 587 $ hg debugobsolete
588 588 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
589 589 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
590 590 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
591 591 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
592 592 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
593 593 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
594 594 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
595 595 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
596 596
597 597 List of changesets with no chain
598 598
599 599 $ hg debugobsolete --hidden --rev ::2
600 600
601 601 List of changesets that are included on marker chain
602 602
603 603 $ hg debugobsolete --hidden --rev 6
604 604 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
605 605
606 606 List of changesets with a longer chain, (including a pruned children)
607 607
608 608 $ hg debugobsolete --hidden --rev 3
609 609 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
610 610 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
611 611 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
612 612 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
613 613 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
614 614 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
615 615 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
616 616
617 617 List of both
618 618
619 619 $ hg debugobsolete --hidden --rev 3::6
620 620 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
621 621 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
622 622 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
623 623 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
624 624 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
625 625 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
626 626 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
627 627 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
628 628
629 629 #if serve
630 630
631 631 Test the debug output for exchange
632 632 ----------------------------------
633 633
634 634 $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' --config 'experimental.bundle2-exp=True'
635 635 pulling from ../tmpb
636 636 searching for changes
637 637 no changes found
638 638 obsmarker-exchange: 346 bytes received
639 639
640 640 check hgweb does not explode
641 641 ====================================
642 642
643 643 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
644 644 adding changesets
645 645 adding manifests
646 646 adding file changes
647 647 added 62 changesets with 63 changes to 9 files (+60 heads)
648 648 (run 'hg heads .' to see heads, 'hg merge' to merge)
649 649 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
650 650 > do
651 651 > hg debugobsolete $node
652 652 > done
653 653 $ hg up tip
654 654 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
655 655
656 656 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
657 657 $ cat hg.pid >> $DAEMON_PIDS
658 658
659 659 check changelog view
660 660
661 661 $ get-with-headers.py --headeronly localhost:$HGPORT 'shortlog/'
662 662 200 Script output follows
663 663
664 664 check graph view
665 665
666 666 $ get-with-headers.py --headeronly localhost:$HGPORT 'graph'
667 667 200 Script output follows
668 668
669 669 check filelog view
670 670
671 671 $ get-with-headers.py --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
672 672 200 Script output follows
673 673
674 674 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/68'
675 675 200 Script output follows
676 676 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/67'
677 677 404 Not Found
678 678 [1]
679 679
680 680 check that web.view config option:
681 681
682 682 $ killdaemons.py hg.pid
683 683 $ cat >> .hg/hgrc << EOF
684 684 > [web]
685 685 > view=all
686 686 > EOF
687 687 $ wait
688 688 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
689 689 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/67'
690 690 200 Script output follows
691 691 $ killdaemons.py hg.pid
692 692
693 693 Checking _enable=False warning if obsolete marker exists
694 694
695 695 $ echo '[experimental]' >> $HGRCPATH
696 696 $ echo "evolution=" >> $HGRCPATH
697 697 $ hg log -r tip
698 698 obsolete feature not enabled but 68 markers found!
699 699 68:c15e9edfca13 (draft) [tip ] add celestine
700 700
701 701 reenable for later test
702 702
703 703 $ echo '[experimental]' >> $HGRCPATH
704 704 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
705 705
706 706 #endif
707 707
708 708 Test incoming/outcoming with changesets obsoleted remotely, known locally
709 709 ===============================================================================
710 710
711 711 This test issue 3805
712 712
713 713 $ hg init repo-issue3805
714 714 $ cd repo-issue3805
715 715 $ echo "base" > base
716 716 $ hg ci -Am "base"
717 717 adding base
718 718 $ echo "foo" > foo
719 719 $ hg ci -Am "A"
720 720 adding foo
721 721 $ hg clone . ../other-issue3805
722 722 updating to branch default
723 723 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
724 724 $ echo "bar" >> foo
725 725 $ hg ci --amend
726 726 $ cd ../other-issue3805
727 727 $ hg log -G
728 728 @ 1:29f0c6921ddd (draft) [tip ] A
729 729 |
730 730 o 0:d20a80d4def3 (draft) [ ] base
731 731
732 732 $ hg log -G -R ../repo-issue3805
733 733 @ 3:323a9c3ddd91 (draft) [tip ] A
734 734 |
735 735 o 0:d20a80d4def3 (draft) [ ] base
736 736
737 737 $ hg incoming
738 738 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
739 739 searching for changes
740 740 3:323a9c3ddd91 (draft) [tip ] A
741 741 $ hg incoming --bundle ../issue3805.hg
742 742 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
743 743 searching for changes
744 744 3:323a9c3ddd91 (draft) [tip ] A
745 745 $ hg outgoing
746 746 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
747 747 searching for changes
748 no changes found
749 [1]
748 1:29f0c6921ddd (draft) [tip ] A
750 749
751 750 #if serve
752 751
753 752 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
754 753 $ cat hg.pid >> $DAEMON_PIDS
755 754
756 755 $ hg incoming http://localhost:$HGPORT
757 756 comparing with http://localhost:$HGPORT/
758 757 searching for changes
759 758 2:323a9c3ddd91 (draft) [tip ] A
760 759 $ hg outgoing http://localhost:$HGPORT
761 760 comparing with http://localhost:$HGPORT/
762 761 searching for changes
763 no changes found
764 [1]
762 1:29f0c6921ddd (draft) [tip ] A
765 763
766 764 $ killdaemons.py
767 765
768 766 #endif
769 767
770 768 This test issue 3814
771 769
772 770 (nothing to push but locally hidden changeset)
773 771
774 772 $ cd ..
775 773 $ hg init repo-issue3814
776 774 $ cd repo-issue3805
777 775 $ hg push -r 323a9c3ddd91 ../repo-issue3814
778 776 pushing to ../repo-issue3814
779 777 searching for changes
780 778 adding changesets
781 779 adding manifests
782 780 adding file changes
783 781 added 2 changesets with 2 changes to 2 files
784 782 2 new obsolescence markers
785 783 $ hg out ../repo-issue3814
786 784 comparing with ../repo-issue3814
787 785 searching for changes
788 786 no changes found
789 787 [1]
790 788
791 789 Test that a local tag blocks a changeset from being hidden
792 790
793 791 $ hg tag -l visible -r 1 --hidden
794 792 $ hg log -G
795 793 @ 3:323a9c3ddd91 (draft) [tip ] A
796 794 |
797 795 | x 1:29f0c6921ddd (draft) [visible ] A
798 796 |/
799 797 o 0:d20a80d4def3 (draft) [ ] base
800 798
801 799 Test that removing a local tag does not cause some commands to fail
802 800
803 801 $ hg tag -l -r tip tiptag
804 802 $ hg tags
805 803 tiptag 3:323a9c3ddd91
806 804 tip 3:323a9c3ddd91
807 805 visible 1:29f0c6921ddd
808 806 $ hg --config extensions.strip= strip -r tip --no-backup
809 807 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
810 808 $ hg tags
811 809 visible 1:29f0c6921ddd
812 810 tip 1:29f0c6921ddd
813 811
814 812 Test bundle overlay onto hidden revision
815 813
816 814 $ cd ..
817 815 $ hg init repo-bundleoverlay
818 816 $ cd repo-bundleoverlay
819 817 $ echo "A" > foo
820 818 $ hg ci -Am "A"
821 819 adding foo
822 820 $ echo "B" >> foo
823 821 $ hg ci -m "B"
824 822 $ hg up 0
825 823 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
826 824 $ echo "C" >> foo
827 825 $ hg ci -m "C"
828 826 created new head
829 827 $ hg log -G
830 828 @ 2:c186d7714947 (draft) [tip ] C
831 829 |
832 830 | o 1:44526ebb0f98 (draft) [ ] B
833 831 |/
834 832 o 0:4b34ecfb0d56 (draft) [ ] A
835 833
836 834
837 835 $ hg clone -r1 . ../other-bundleoverlay
838 836 adding changesets
839 837 adding manifests
840 838 adding file changes
841 839 added 2 changesets with 2 changes to 1 files
842 840 updating to branch default
843 841 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
844 842 $ cd ../other-bundleoverlay
845 843 $ echo "B+" >> foo
846 844 $ hg ci --amend -m "B+"
847 845 $ hg log -G --hidden
848 846 @ 3:b7d587542d40 (draft) [tip ] B+
849 847 |
850 848 | x 2:eb95e9297e18 (draft) [ ] temporary amend commit for 44526ebb0f98
851 849 | |
852 850 | x 1:44526ebb0f98 (draft) [ ] B
853 851 |/
854 852 o 0:4b34ecfb0d56 (draft) [ ] A
855 853
856 854
857 855 $ hg incoming ../repo-bundleoverlay --bundle ../bundleoverlay.hg
858 856 comparing with ../repo-bundleoverlay
859 857 searching for changes
860 858 1:44526ebb0f98 (draft) [ ] B
861 859 2:c186d7714947 (draft) [tip ] C
862 860 $ hg log -G -R ../bundleoverlay.hg
863 861 o 4:c186d7714947 (draft) [tip ] C
864 862 |
865 863 | @ 3:b7d587542d40 (draft) [ ] B+
866 864 |/
867 865 o 0:4b34ecfb0d56 (draft) [ ] A
868 866
869 867
870 868 #if serve
871 869
872 870 Test issue 4506
873 871
874 872 $ cd ..
875 873 $ hg init repo-issue4506
876 874 $ cd repo-issue4506
877 875 $ echo "0" > foo
878 876 $ hg add foo
879 877 $ hg ci -m "content-0"
880 878
881 879 $ hg up null
882 880 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
883 881 $ echo "1" > bar
884 882 $ hg add bar
885 883 $ hg ci -m "content-1"
886 884 created new head
887 885 $ hg up 0
888 886 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
889 887 $ hg graft 1
890 888 grafting 1:1c9eddb02162 "content-1" (tip)
891 889
892 890 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
893 891
894 892 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
895 893 $ cat hg.pid >> $DAEMON_PIDS
896 894
897 895 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/1'
898 896 404 Not Found
899 897 [1]
900 898 $ get-with-headers.py --headeronly localhost:$HGPORT 'file/tip/bar'
901 899 200 Script output follows
902 900 $ get-with-headers.py --headeronly localhost:$HGPORT 'annotate/tip/bar'
903 901 200 Script output follows
904 902
905 903 $ killdaemons.py
906 904
907 905 #endif
908 906
909 907 Test heads computation on pending index changes with obsolescence markers
910 908 $ cd ..
911 909 $ cat >$TESTTMP/test_extension.py << EOF
912 910 > from mercurial import cmdutil
913 911 > from mercurial.i18n import _
914 912 >
915 913 > cmdtable = {}
916 914 > command = cmdutil.command(cmdtable)
917 915 > @command("amendtransient",[], _('hg amendtransient [rev]'))
918 916 > def amend(ui, repo, *pats, **opts):
919 917 > def commitfunc(ui, repo, message, match, opts):
920 918 > return repo.commit(message, repo['.'].user(), repo['.'].date(), match)
921 919 > opts['message'] = 'Test'
922 920 > opts['logfile'] = None
923 921 > cmdutil.amend(ui, repo, commitfunc, repo['.'], {}, pats, opts)
924 922 > print repo.changelog.headrevs()
925 923 > EOF
926 924 $ cat >> $HGRCPATH << EOF
927 925 > [extensions]
928 926 > testextension=$TESTTMP/test_extension.py
929 927 > EOF
930 928 $ hg init repo-issue-nativerevs-pending-changes
931 929 $ cd repo-issue-nativerevs-pending-changes
932 930 $ mkcommit a
933 931 $ mkcommit b
934 932 $ hg up ".^"
935 933 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
936 934 $ echo aa > a
937 935 $ hg amendtransient
938 936 [1, 3]
939 937
940 938 Test cache consistency for the visible filter
941 939 1) We want to make sure that the cached filtered revs are invalidated when
942 940 bookmarks change
943 941 $ cd ..
944 942 $ cat >$TESTTMP/test_extension.py << EOF
945 943 > from mercurial import cmdutil, extensions, bookmarks, repoview
946 944 > def _bookmarkchanged(orig, bkmstoreinst, *args, **kwargs):
947 945 > repo = bkmstoreinst._repo
948 946 > ret = orig(bkmstoreinst, *args, **kwargs)
949 947 > hidden1 = repoview.computehidden(repo)
950 948 > hidden = repoview.filterrevs(repo, 'visible')
951 949 > if sorted(hidden1) != sorted(hidden):
952 950 > print "cache inconsistency"
953 951 > return ret
954 952 > def extsetup(ui):
955 953 > extensions.wrapfunction(bookmarks.bmstore, 'write', _bookmarkchanged)
956 954 > EOF
957 955
958 956 $ hg init repo-cache-inconsistency
959 957 $ cd repo-issue-nativerevs-pending-changes
960 958 $ mkcommit a
961 959 a already tracked!
962 960 $ mkcommit b
963 961 $ hg id
964 962 13bedc178fce tip
965 963 $ echo "hello" > b
966 964 $ hg commit --amend -m "message"
967 965 $ hg book bookb -r 13bedc178fce --hidden
968 966 $ hg log -r 13bedc178fce
969 967 5:13bedc178fce (draft) [ bookb] add b
970 968 $ hg book -d bookb
971 969 $ hg log -r 13bedc178fce
972 970 abort: hidden revision '13bedc178fce'!
973 971 (use --hidden to access hidden revisions)
974 972 [255]
975 973
976 974
977 975
General Comments 0
You need to be logged in to leave comments. Login now