##// END OF EJS Templates
localrepo: back out changeset b08af8f0ac01...
Pierre-Yves David -
r24234:7977d35d default
parent child Browse files
Show More
@@ -1,1855 +1,1856
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self.filterpats = {}
283 283 self._datafilters = {}
284 284 self._transref = self._lockref = self._wlockref = None
285 285
286 286 # A cache for various files under .hg/ that tracks file changes,
287 287 # (used by the filecache decorator)
288 288 #
289 289 # Maps a property name to its util.filecacheentry
290 290 self._filecache = {}
291 291
292 292 # hold sets of revision to be filtered
293 293 # should be cleared when something might have changed the filter value:
294 294 # - new changesets,
295 295 # - phase change,
296 296 # - new obsolescence marker,
297 297 # - working directory parent change,
298 298 # - bookmark changes
299 299 self.filteredrevcache = {}
300 300
301 301 # generic mapping between names and nodes
302 302 self.names = namespaces.namespaces()
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.svfs.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.svfs.options['chunkcachesize'] = chunkcachesize
323 323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 324 if maxchainlen is not None:
325 325 self.svfs.options['maxchainlen'] = maxchainlen
326 326 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 327 if manifestcachesize is not None:
328 328 self.svfs.options['manifestcachesize'] = manifestcachesize
329 329
330 330 def _writerequirements(self):
331 331 reqfile = self.vfs("requires", "w")
332 332 for r in sorted(self.requirements):
333 333 reqfile.write("%s\n" % r)
334 334 reqfile.close()
335 335
336 336 def _checknested(self, path):
337 337 """Determine if path is a legal nested repository."""
338 338 if not path.startswith(self.root):
339 339 return False
340 340 subpath = path[len(self.root) + 1:]
341 341 normsubpath = util.pconvert(subpath)
342 342
343 343 # XXX: Checking against the current working copy is wrong in
344 344 # the sense that it can reject things like
345 345 #
346 346 # $ hg cat -r 10 sub/x.txt
347 347 #
348 348 # if sub/ is no longer a subrepository in the working copy
349 349 # parent revision.
350 350 #
351 351 # However, it can of course also allow things that would have
352 352 # been rejected before, such as the above cat command if sub/
353 353 # is a subrepository now, but was a normal directory before.
354 354 # The old path auditor would have rejected by mistake since it
355 355 # panics when it sees sub/.hg/.
356 356 #
357 357 # All in all, checking against the working copy seems sensible
358 358 # since we want to prevent access to nested repositories on
359 359 # the filesystem *now*.
360 360 ctx = self[None]
361 361 parts = util.splitpath(subpath)
362 362 while parts:
363 363 prefix = '/'.join(parts)
364 364 if prefix in ctx.substate:
365 365 if prefix == normsubpath:
366 366 return True
367 367 else:
368 368 sub = ctx.sub(prefix)
369 369 return sub.checknested(subpath[len(prefix) + 1:])
370 370 else:
371 371 parts.pop()
372 372 return False
373 373
374 374 def peer(self):
375 375 return localpeer(self) # not cached to avoid reference cycle
376 376
377 377 def unfiltered(self):
378 378 """Return unfiltered version of the repository
379 379
380 380 Intended to be overwritten by filtered repo."""
381 381 return self
382 382
383 383 def filtered(self, name):
384 384 """Return a filtered version of a repository"""
385 385 # build a new class with the mixin and the current class
386 386 # (possibly subclass of the repo)
387 387 class proxycls(repoview.repoview, self.unfiltered().__class__):
388 388 pass
389 389 return proxycls(self, name)
390 390
391 391 @repofilecache('bookmarks')
392 392 def _bookmarks(self):
393 393 return bookmarks.bmstore(self)
394 394
395 395 @repofilecache('bookmarks.current')
396 396 def _bookmarkcurrent(self):
397 397 return bookmarks.readcurrent(self)
398 398
399 399 def bookmarkheads(self, bookmark):
400 400 name = bookmark.split('@', 1)[0]
401 401 heads = []
402 402 for mark, n in self._bookmarks.iteritems():
403 403 if mark.split('@', 1)[0] == name:
404 404 heads.append(n)
405 405 return heads
406 406
407 407 @storecache('phaseroots')
408 408 def _phasecache(self):
409 409 return phases.phasecache(self, self._phasedefaults)
410 410
411 411 @storecache('obsstore')
412 412 def obsstore(self):
413 413 # read default format for new obsstore.
414 414 defaultformat = self.ui.configint('format', 'obsstore-version', None)
415 415 # rely on obsstore class default when possible.
416 416 kwargs = {}
417 417 if defaultformat is not None:
418 418 kwargs['defaultformat'] = defaultformat
419 419 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
420 420 store = obsolete.obsstore(self.svfs, readonly=readonly,
421 421 **kwargs)
422 422 if store and readonly:
423 423 # message is rare enough to not be translated
424 424 msg = 'obsolete feature not enabled but %i markers found!\n'
425 425 self.ui.warn(msg % len(list(store)))
426 426 return store
427 427
428 428 @storecache('00changelog.i')
429 429 def changelog(self):
430 430 c = changelog.changelog(self.svfs)
431 431 if 'HG_PENDING' in os.environ:
432 432 p = os.environ['HG_PENDING']
433 433 if p.startswith(self.root):
434 434 c.readpending('00changelog.i.a')
435 435 return c
436 436
437 437 @storecache('00manifest.i')
438 438 def manifest(self):
439 439 return manifest.manifest(self.svfs)
440 440
441 441 @repofilecache('dirstate')
442 442 def dirstate(self):
443 443 warned = [0]
444 444 def validate(node):
445 445 try:
446 446 self.changelog.rev(node)
447 447 return node
448 448 except error.LookupError:
449 449 if not warned[0]:
450 450 warned[0] = True
451 451 self.ui.warn(_("warning: ignoring unknown"
452 452 " working parent %s!\n") % short(node))
453 453 return nullid
454 454
455 455 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
456 456
457 457 def __getitem__(self, changeid):
458 458 if changeid is None:
459 459 return context.workingctx(self)
460 460 if isinstance(changeid, slice):
461 461 return [context.changectx(self, i)
462 462 for i in xrange(*changeid.indices(len(self)))
463 463 if i not in self.changelog.filteredrevs]
464 464 return context.changectx(self, changeid)
465 465
466 466 def __contains__(self, changeid):
467 467 try:
468 468 return bool(self.lookup(changeid))
469 469 except error.RepoLookupError:
470 470 return False
471 471
472 472 def __nonzero__(self):
473 473 return True
474 474
475 475 def __len__(self):
476 476 return len(self.changelog)
477 477
478 478 def __iter__(self):
479 479 return iter(self.changelog)
480 480
481 481 def revs(self, expr, *args):
482 482 '''Return a list of revisions matching the given revset'''
483 483 expr = revset.formatspec(expr, *args)
484 484 m = revset.match(None, expr)
485 485 return m(self)
486 486
487 487 def set(self, expr, *args):
488 488 '''
489 489 Yield a context for each matching revision, after doing arg
490 490 replacement via revset.formatspec
491 491 '''
492 492 for r in self.revs(expr, *args):
493 493 yield self[r]
494 494
495 495 def url(self):
496 496 return 'file:' + self.root
497 497
498 498 def hook(self, name, throw=False, **args):
499 499 """Call a hook, passing this repo instance.
500 500
501 501 This a convenience method to aid invoking hooks. Extensions likely
502 502 won't call this unless they have registered a custom hook or are
503 503 replacing code that is expected to call a hook.
504 504 """
505 505 return hook.hook(self.ui, self, name, throw, **args)
506 506
507 @unfilteredmethod
507 508 def _tag(self, names, node, message, local, user, date, extra={},
508 509 editor=False):
509 510 if isinstance(names, str):
510 511 names = (names,)
511 512
512 513 branches = self.branchmap()
513 514 for name in names:
514 515 self.hook('pretag', throw=True, node=hex(node), tag=name,
515 516 local=local)
516 517 if name in branches:
517 518 self.ui.warn(_("warning: tag %s conflicts with existing"
518 519 " branch name\n") % name)
519 520
520 521 def writetags(fp, names, munge, prevtags):
521 522 fp.seek(0, 2)
522 523 if prevtags and prevtags[-1] != '\n':
523 524 fp.write('\n')
524 525 for name in names:
525 526 m = munge and munge(name) or name
526 527 if (self._tagscache.tagtypes and
527 528 name in self._tagscache.tagtypes):
528 529 old = self.tags().get(name, nullid)
529 530 fp.write('%s %s\n' % (hex(old), m))
530 531 fp.write('%s %s\n' % (hex(node), m))
531 532 fp.close()
532 533
533 534 prevtags = ''
534 535 if local:
535 536 try:
536 537 fp = self.vfs('localtags', 'r+')
537 538 except IOError:
538 539 fp = self.vfs('localtags', 'a')
539 540 else:
540 541 prevtags = fp.read()
541 542
542 543 # local tags are stored in the current charset
543 544 writetags(fp, names, None, prevtags)
544 545 for name in names:
545 546 self.hook('tag', node=hex(node), tag=name, local=local)
546 547 return
547 548
548 549 try:
549 550 fp = self.wfile('.hgtags', 'rb+')
550 551 except IOError, e:
551 552 if e.errno != errno.ENOENT:
552 553 raise
553 554 fp = self.wfile('.hgtags', 'ab')
554 555 else:
555 556 prevtags = fp.read()
556 557
557 558 # committed tags are stored in UTF-8
558 559 writetags(fp, names, encoding.fromlocal, prevtags)
559 560
560 561 fp.close()
561 562
562 563 self.invalidatecaches()
563 564
564 565 if '.hgtags' not in self.dirstate:
565 566 self[None].add(['.hgtags'])
566 567
567 568 m = matchmod.exact(self.root, '', ['.hgtags'])
568 569 tagnode = self.commit(message, user, date, extra=extra, match=m,
569 570 editor=editor)
570 571
571 572 for name in names:
572 573 self.hook('tag', node=hex(node), tag=name, local=local)
573 574
574 575 return tagnode
575 576
576 577 def tag(self, names, node, message, local, user, date, editor=False):
577 578 '''tag a revision with one or more symbolic names.
578 579
579 580 names is a list of strings or, when adding a single tag, names may be a
580 581 string.
581 582
582 583 if local is True, the tags are stored in a per-repository file.
583 584 otherwise, they are stored in the .hgtags file, and a new
584 585 changeset is committed with the change.
585 586
586 587 keyword arguments:
587 588
588 589 local: whether to store tags in non-version-controlled file
589 590 (default False)
590 591
591 592 message: commit message to use if committing
592 593
593 594 user: name of user to use if committing
594 595
595 596 date: date tuple to use if committing'''
596 597
597 598 if not local:
598 599 m = matchmod.exact(self.root, '', ['.hgtags'])
599 600 if util.any(self.status(match=m, unknown=True, ignored=True)):
600 601 raise util.Abort(_('working copy of .hgtags is changed'),
601 602 hint=_('please commit .hgtags manually'))
602 603
603 604 self.tags() # instantiate the cache
604 605 self._tag(names, node, message, local, user, date, editor=editor)
605 606
606 607 @filteredpropertycache
607 608 def _tagscache(self):
608 609 '''Returns a tagscache object that contains various tags related
609 610 caches.'''
610 611
611 612 # This simplifies its cache management by having one decorated
612 613 # function (this one) and the rest simply fetch things from it.
613 614 class tagscache(object):
614 615 def __init__(self):
615 616 # These two define the set of tags for this repository. tags
616 617 # maps tag name to node; tagtypes maps tag name to 'global' or
617 618 # 'local'. (Global tags are defined by .hgtags across all
618 619 # heads, and local tags are defined in .hg/localtags.)
619 620 # They constitute the in-memory cache of tags.
620 621 self.tags = self.tagtypes = None
621 622
622 623 self.nodetagscache = self.tagslist = None
623 624
624 625 cache = tagscache()
625 626 cache.tags, cache.tagtypes = self._findtags()
626 627
627 628 return cache
628 629
629 630 def tags(self):
630 631 '''return a mapping of tag to node'''
631 632 t = {}
632 633 if self.changelog.filteredrevs:
633 634 tags, tt = self._findtags()
634 635 else:
635 636 tags = self._tagscache.tags
636 637 for k, v in tags.iteritems():
637 638 try:
638 639 # ignore tags to unknown nodes
639 640 self.changelog.rev(v)
640 641 t[k] = v
641 642 except (error.LookupError, ValueError):
642 643 pass
643 644 return t
644 645
645 646 def _findtags(self):
646 647 '''Do the hard work of finding tags. Return a pair of dicts
647 648 (tags, tagtypes) where tags maps tag name to node, and tagtypes
648 649 maps tag name to a string like \'global\' or \'local\'.
649 650 Subclasses or extensions are free to add their own tags, but
650 651 should be aware that the returned dicts will be retained for the
651 652 duration of the localrepo object.'''
652 653
653 654 # XXX what tagtype should subclasses/extensions use? Currently
654 655 # mq and bookmarks add tags, but do not set the tagtype at all.
655 656 # Should each extension invent its own tag type? Should there
656 657 # be one tagtype for all such "virtual" tags? Or is the status
657 658 # quo fine?
658 659
659 660 alltags = {} # map tag name to (node, hist)
660 661 tagtypes = {}
661 662
662 663 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
663 664 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
664 665
665 666 # Build the return dicts. Have to re-encode tag names because
666 667 # the tags module always uses UTF-8 (in order not to lose info
667 668 # writing to the cache), but the rest of Mercurial wants them in
668 669 # local encoding.
669 670 tags = {}
670 671 for (name, (node, hist)) in alltags.iteritems():
671 672 if node != nullid:
672 673 tags[encoding.tolocal(name)] = node
673 674 tags['tip'] = self.changelog.tip()
674 675 tagtypes = dict([(encoding.tolocal(name), value)
675 676 for (name, value) in tagtypes.iteritems()])
676 677 return (tags, tagtypes)
677 678
678 679 def tagtype(self, tagname):
679 680 '''
680 681 return the type of the given tag. result can be:
681 682
682 683 'local' : a local tag
683 684 'global' : a global tag
684 685 None : tag does not exist
685 686 '''
686 687
687 688 return self._tagscache.tagtypes.get(tagname)
688 689
689 690 def tagslist(self):
690 691 '''return a list of tags ordered by revision'''
691 692 if not self._tagscache.tagslist:
692 693 l = []
693 694 for t, n in self.tags().iteritems():
694 695 l.append((self.changelog.rev(n), t, n))
695 696 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
696 697
697 698 return self._tagscache.tagslist
698 699
699 700 def nodetags(self, node):
700 701 '''return the tags associated with a node'''
701 702 if not self._tagscache.nodetagscache:
702 703 nodetagscache = {}
703 704 for t, n in self._tagscache.tags.iteritems():
704 705 nodetagscache.setdefault(n, []).append(t)
705 706 for tags in nodetagscache.itervalues():
706 707 tags.sort()
707 708 self._tagscache.nodetagscache = nodetagscache
708 709 return self._tagscache.nodetagscache.get(node, [])
709 710
710 711 def nodebookmarks(self, node):
711 712 marks = []
712 713 for bookmark, n in self._bookmarks.iteritems():
713 714 if n == node:
714 715 marks.append(bookmark)
715 716 return sorted(marks)
716 717
717 718 def branchmap(self):
718 719 '''returns a dictionary {branch: [branchheads]} with branchheads
719 720 ordered by increasing revision number'''
720 721 branchmap.updatecache(self)
721 722 return self._branchcaches[self.filtername]
722 723
723 724 def branchtip(self, branch, ignoremissing=False):
724 725 '''return the tip node for a given branch
725 726
726 727 If ignoremissing is True, then this method will not raise an error.
727 728 This is helpful for callers that only expect None for a missing branch
728 729 (e.g. namespace).
729 730
730 731 '''
731 732 try:
732 733 return self.branchmap().branchtip(branch)
733 734 except KeyError:
734 735 if not ignoremissing:
735 736 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
736 737 else:
737 738 pass
738 739
739 740 def lookup(self, key):
740 741 return self[key].node()
741 742
742 743 def lookupbranch(self, key, remote=None):
743 744 repo = remote or self
744 745 if key in repo.branchmap():
745 746 return key
746 747
747 748 repo = (remote and remote.local()) and remote or self
748 749 return repo[key].branch()
749 750
750 751 def known(self, nodes):
751 752 nm = self.changelog.nodemap
752 753 pc = self._phasecache
753 754 result = []
754 755 for n in nodes:
755 756 r = nm.get(n)
756 757 resp = not (r is None or pc.phase(self, r) >= phases.secret)
757 758 result.append(resp)
758 759 return result
759 760
760 761 def local(self):
761 762 return self
762 763
763 764 def cancopy(self):
764 765 # so statichttprepo's override of local() works
765 766 if not self.local():
766 767 return False
767 768 if not self.ui.configbool('phases', 'publish', True):
768 769 return True
769 770 # if publishing we can't copy if there is filtered content
770 771 return not self.filtered('visible').changelog.filteredrevs
771 772
772 773 def shared(self):
773 774 '''the type of shared repository (None if not shared)'''
774 775 if self.sharedpath != self.path:
775 776 return 'store'
776 777 return None
777 778
778 779 def join(self, f, *insidef):
779 780 return self.vfs.join(os.path.join(f, *insidef))
780 781
781 782 def wjoin(self, f, *insidef):
782 783 return self.vfs.reljoin(self.root, f, *insidef)
783 784
784 785 def file(self, f):
785 786 if f[0] == '/':
786 787 f = f[1:]
787 788 return filelog.filelog(self.svfs, f)
788 789
789 790 def changectx(self, changeid):
790 791 return self[changeid]
791 792
792 793 def parents(self, changeid=None):
793 794 '''get list of changectxs for parents of changeid'''
794 795 return self[changeid].parents()
795 796
796 797 def setparents(self, p1, p2=nullid):
797 798 self.dirstate.beginparentchange()
798 799 copies = self.dirstate.setparents(p1, p2)
799 800 pctx = self[p1]
800 801 if copies:
801 802 # Adjust copy records, the dirstate cannot do it, it
802 803 # requires access to parents manifests. Preserve them
803 804 # only for entries added to first parent.
804 805 for f in copies:
805 806 if f not in pctx and copies[f] in pctx:
806 807 self.dirstate.copy(copies[f], f)
807 808 if p2 == nullid:
808 809 for f, s in sorted(self.dirstate.copies().items()):
809 810 if f not in pctx and s not in pctx:
810 811 self.dirstate.copy(None, f)
811 812 self.dirstate.endparentchange()
812 813
813 814 def filectx(self, path, changeid=None, fileid=None):
814 815 """changeid can be a changeset revision, node, or tag.
815 816 fileid can be a file revision or node."""
816 817 return context.filectx(self, path, changeid, fileid)
817 818
818 819 def getcwd(self):
819 820 return self.dirstate.getcwd()
820 821
821 822 def pathto(self, f, cwd=None):
822 823 return self.dirstate.pathto(f, cwd)
823 824
824 825 def wfile(self, f, mode='r'):
825 826 return self.wvfs(f, mode)
826 827
827 828 def _link(self, f):
828 829 return self.wvfs.islink(f)
829 830
830 831 def _loadfilter(self, filter):
831 832 if filter not in self.filterpats:
832 833 l = []
833 834 for pat, cmd in self.ui.configitems(filter):
834 835 if cmd == '!':
835 836 continue
836 837 mf = matchmod.match(self.root, '', [pat])
837 838 fn = None
838 839 params = cmd
839 840 for name, filterfn in self._datafilters.iteritems():
840 841 if cmd.startswith(name):
841 842 fn = filterfn
842 843 params = cmd[len(name):].lstrip()
843 844 break
844 845 if not fn:
845 846 fn = lambda s, c, **kwargs: util.filter(s, c)
846 847 # Wrap old filters not supporting keyword arguments
847 848 if not inspect.getargspec(fn)[2]:
848 849 oldfn = fn
849 850 fn = lambda s, c, **kwargs: oldfn(s, c)
850 851 l.append((mf, fn, params))
851 852 self.filterpats[filter] = l
852 853 return self.filterpats[filter]
853 854
854 855 def _filter(self, filterpats, filename, data):
855 856 for mf, fn, cmd in filterpats:
856 857 if mf(filename):
857 858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
858 859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
859 860 break
860 861
861 862 return data
862 863
863 864 @unfilteredpropertycache
864 865 def _encodefilterpats(self):
865 866 return self._loadfilter('encode')
866 867
867 868 @unfilteredpropertycache
868 869 def _decodefilterpats(self):
869 870 return self._loadfilter('decode')
870 871
871 872 def adddatafilter(self, name, filter):
872 873 self._datafilters[name] = filter
873 874
874 875 def wread(self, filename):
875 876 if self._link(filename):
876 877 data = self.wvfs.readlink(filename)
877 878 else:
878 879 data = self.wvfs.read(filename)
879 880 return self._filter(self._encodefilterpats, filename, data)
880 881
881 882 def wwrite(self, filename, data, flags):
882 883 data = self._filter(self._decodefilterpats, filename, data)
883 884 if 'l' in flags:
884 885 self.wvfs.symlink(data, filename)
885 886 else:
886 887 self.wvfs.write(filename, data)
887 888 if 'x' in flags:
888 889 self.wvfs.setflags(filename, False, True)
889 890
890 891 def wwritedata(self, filename, data):
891 892 return self._filter(self._decodefilterpats, filename, data)
892 893
893 894 def currenttransaction(self):
894 895 """return the current transaction or None if non exists"""
895 896 tr = self._transref and self._transref() or None
896 897 if tr and tr.running():
897 898 return tr
898 899 return None
899 900
900 901 def transaction(self, desc, report=None):
901 902 tr = self.currenttransaction()
902 903 if tr is not None:
903 904 return tr.nest()
904 905
905 906 # abort here if the journal already exists
906 907 if self.svfs.exists("journal"):
907 908 raise error.RepoError(
908 909 _("abandoned transaction found"),
909 910 hint=_("run 'hg recover' to clean up transaction"))
910 911
911 912 self._writejournal(desc)
912 913 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
913 914 rp = report and report or self.ui.warn
914 915 vfsmap = {'plain': self.vfs} # root of .hg/
915 916 tr = transaction.transaction(rp, self.svfs, vfsmap,
916 917 "journal",
917 918 "undo",
918 919 aftertrans(renames),
919 920 self.store.createmode)
920 921 # note: writing the fncache only during finalize mean that the file is
921 922 # outdated when running hooks. As fncache is used for streaming clone,
922 923 # this is not expected to break anything that happen during the hooks.
923 924 tr.addfinalize('flush-fncache', self.store.write)
924 925 self._transref = weakref.ref(tr)
925 926 return tr
926 927
927 928 def _journalfiles(self):
928 929 return ((self.svfs, 'journal'),
929 930 (self.vfs, 'journal.dirstate'),
930 931 (self.vfs, 'journal.branch'),
931 932 (self.vfs, 'journal.desc'),
932 933 (self.vfs, 'journal.bookmarks'),
933 934 (self.svfs, 'journal.phaseroots'))
934 935
935 936 def undofiles(self):
936 937 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
937 938
938 939 def _writejournal(self, desc):
939 940 self.vfs.write("journal.dirstate",
940 941 self.vfs.tryread("dirstate"))
941 942 self.vfs.write("journal.branch",
942 943 encoding.fromlocal(self.dirstate.branch()))
943 944 self.vfs.write("journal.desc",
944 945 "%d\n%s\n" % (len(self), desc))
945 946 self.vfs.write("journal.bookmarks",
946 947 self.vfs.tryread("bookmarks"))
947 948 self.svfs.write("journal.phaseroots",
948 949 self.svfs.tryread("phaseroots"))
949 950
950 951 def recover(self):
951 952 lock = self.lock()
952 953 try:
953 954 if self.svfs.exists("journal"):
954 955 self.ui.status(_("rolling back interrupted transaction\n"))
955 956 vfsmap = {'': self.svfs,
956 957 'plain': self.vfs,}
957 958 transaction.rollback(self.svfs, vfsmap, "journal",
958 959 self.ui.warn)
959 960 self.invalidate()
960 961 return True
961 962 else:
962 963 self.ui.warn(_("no interrupted transaction available\n"))
963 964 return False
964 965 finally:
965 966 lock.release()
966 967
967 968 def rollback(self, dryrun=False, force=False):
968 969 wlock = lock = None
969 970 try:
970 971 wlock = self.wlock()
971 972 lock = self.lock()
972 973 if self.svfs.exists("undo"):
973 974 return self._rollback(dryrun, force)
974 975 else:
975 976 self.ui.warn(_("no rollback information available\n"))
976 977 return 1
977 978 finally:
978 979 release(lock, wlock)
979 980
980 981 @unfilteredmethod # Until we get smarter cache management
981 982 def _rollback(self, dryrun, force):
982 983 ui = self.ui
983 984 try:
984 985 args = self.vfs.read('undo.desc').splitlines()
985 986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 987 if len(args) >= 3:
987 988 detail = args[2]
988 989 oldtip = oldlen - 1
989 990
990 991 if detail and ui.verbose:
991 992 msg = (_('repository tip rolled back to revision %s'
992 993 ' (undo %s: %s)\n')
993 994 % (oldtip, desc, detail))
994 995 else:
995 996 msg = (_('repository tip rolled back to revision %s'
996 997 ' (undo %s)\n')
997 998 % (oldtip, desc))
998 999 except IOError:
999 1000 msg = _('rolling back unknown transaction\n')
1000 1001 desc = None
1001 1002
1002 1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 1004 raise util.Abort(
1004 1005 _('rollback of last commit while not checked out '
1005 1006 'may lose data'), hint=_('use -f to force'))
1006 1007
1007 1008 ui.status(msg)
1008 1009 if dryrun:
1009 1010 return 0
1010 1011
1011 1012 parents = self.dirstate.parents()
1012 1013 self.destroying()
1013 1014 vfsmap = {'plain': self.vfs, '': self.svfs}
1014 1015 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1015 1016 if self.vfs.exists('undo.bookmarks'):
1016 1017 self.vfs.rename('undo.bookmarks', 'bookmarks')
1017 1018 if self.svfs.exists('undo.phaseroots'):
1018 1019 self.svfs.rename('undo.phaseroots', 'phaseroots')
1019 1020 self.invalidate()
1020 1021
1021 1022 parentgone = (parents[0] not in self.changelog.nodemap or
1022 1023 parents[1] not in self.changelog.nodemap)
1023 1024 if parentgone:
1024 1025 self.vfs.rename('undo.dirstate', 'dirstate')
1025 1026 try:
1026 1027 branch = self.vfs.read('undo.branch')
1027 1028 self.dirstate.setbranch(encoding.tolocal(branch))
1028 1029 except IOError:
1029 1030 ui.warn(_('named branch could not be reset: '
1030 1031 'current branch is still \'%s\'\n')
1031 1032 % self.dirstate.branch())
1032 1033
1033 1034 self.dirstate.invalidate()
1034 1035 parents = tuple([p.rev() for p in self.parents()])
1035 1036 if len(parents) > 1:
1036 1037 ui.status(_('working directory now based on '
1037 1038 'revisions %d and %d\n') % parents)
1038 1039 else:
1039 1040 ui.status(_('working directory now based on '
1040 1041 'revision %d\n') % parents)
1041 1042 # TODO: if we know which new heads may result from this rollback, pass
1042 1043 # them to destroy(), which will prevent the branchhead cache from being
1043 1044 # invalidated.
1044 1045 self.destroyed()
1045 1046 return 0
1046 1047
1047 1048 def invalidatecaches(self):
1048 1049
1049 1050 if '_tagscache' in vars(self):
1050 1051 # can't use delattr on proxy
1051 1052 del self.__dict__['_tagscache']
1052 1053
1053 1054 self.unfiltered()._branchcaches.clear()
1054 1055 self.invalidatevolatilesets()
1055 1056
1056 1057 def invalidatevolatilesets(self):
1057 1058 self.filteredrevcache.clear()
1058 1059 obsolete.clearobscaches(self)
1059 1060
1060 1061 def invalidatedirstate(self):
1061 1062 '''Invalidates the dirstate, causing the next call to dirstate
1062 1063 to check if it was modified since the last time it was read,
1063 1064 rereading it if it has.
1064 1065
1065 1066 This is different to dirstate.invalidate() that it doesn't always
1066 1067 rereads the dirstate. Use dirstate.invalidate() if you want to
1067 1068 explicitly read the dirstate again (i.e. restoring it to a previous
1068 1069 known good state).'''
1069 1070 if hasunfilteredcache(self, 'dirstate'):
1070 1071 for k in self.dirstate._filecache:
1071 1072 try:
1072 1073 delattr(self.dirstate, k)
1073 1074 except AttributeError:
1074 1075 pass
1075 1076 delattr(self.unfiltered(), 'dirstate')
1076 1077
1077 1078 def invalidate(self):
1078 1079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1079 1080 for k in self._filecache:
1080 1081 # dirstate is invalidated separately in invalidatedirstate()
1081 1082 if k == 'dirstate':
1082 1083 continue
1083 1084
1084 1085 try:
1085 1086 delattr(unfiltered, k)
1086 1087 except AttributeError:
1087 1088 pass
1088 1089 self.invalidatecaches()
1089 1090 self.store.invalidatecaches()
1090 1091
1091 1092 def invalidateall(self):
1092 1093 '''Fully invalidates both store and non-store parts, causing the
1093 1094 subsequent operation to reread any outside changes.'''
1094 1095 # extension should hook this to invalidate its caches
1095 1096 self.invalidate()
1096 1097 self.invalidatedirstate()
1097 1098
1098 1099 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1099 1100 try:
1100 1101 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1101 1102 except error.LockHeld, inst:
1102 1103 if not wait:
1103 1104 raise
1104 1105 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1105 1106 (desc, inst.locker))
1106 1107 # default to 600 seconds timeout
1107 1108 l = lockmod.lock(vfs, lockname,
1108 1109 int(self.ui.config("ui", "timeout", "600")),
1109 1110 releasefn, desc=desc)
1110 1111 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1111 1112 if acquirefn:
1112 1113 acquirefn()
1113 1114 return l
1114 1115
1115 1116 def _afterlock(self, callback):
1116 1117 """add a callback to the current repository lock.
1117 1118
1118 1119 The callback will be executed on lock release."""
1119 1120 l = self._lockref and self._lockref()
1120 1121 if l:
1121 1122 l.postrelease.append(callback)
1122 1123 else:
1123 1124 callback()
1124 1125
1125 1126 def lock(self, wait=True):
1126 1127 '''Lock the repository store (.hg/store) and return a weak reference
1127 1128 to the lock. Use this before modifying the store (e.g. committing or
1128 1129 stripping). If you are opening a transaction, get a lock as well.)'''
1129 1130 l = self._lockref and self._lockref()
1130 1131 if l is not None and l.held:
1131 1132 l.lock()
1132 1133 return l
1133 1134
1134 1135 def unlock():
1135 1136 for k, ce in self._filecache.items():
1136 1137 if k == 'dirstate' or k not in self.__dict__:
1137 1138 continue
1138 1139 ce.refresh()
1139 1140
1140 1141 l = self._lock(self.svfs, "lock", wait, unlock,
1141 1142 self.invalidate, _('repository %s') % self.origroot)
1142 1143 self._lockref = weakref.ref(l)
1143 1144 return l
1144 1145
1145 1146 def wlock(self, wait=True):
1146 1147 '''Lock the non-store parts of the repository (everything under
1147 1148 .hg except .hg/store) and return a weak reference to the lock.
1148 1149 Use this before modifying files in .hg.'''
1149 1150 l = self._wlockref and self._wlockref()
1150 1151 if l is not None and l.held:
1151 1152 l.lock()
1152 1153 return l
1153 1154
1154 1155 def unlock():
1155 1156 if self.dirstate.pendingparentchange():
1156 1157 self.dirstate.invalidate()
1157 1158 else:
1158 1159 self.dirstate.write()
1159 1160
1160 1161 self._filecache['dirstate'].refresh()
1161 1162
1162 1163 l = self._lock(self.vfs, "wlock", wait, unlock,
1163 1164 self.invalidatedirstate, _('working directory of %s') %
1164 1165 self.origroot)
1165 1166 self._wlockref = weakref.ref(l)
1166 1167 return l
1167 1168
1168 1169 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1169 1170 """
1170 1171 commit an individual file as part of a larger transaction
1171 1172 """
1172 1173
1173 1174 fname = fctx.path()
1174 1175 text = fctx.data()
1175 1176 flog = self.file(fname)
1176 1177 fparent1 = manifest1.get(fname, nullid)
1177 1178 fparent2 = manifest2.get(fname, nullid)
1178 1179
1179 1180 meta = {}
1180 1181 copy = fctx.renamed()
1181 1182 if copy and copy[0] != fname:
1182 1183 # Mark the new revision of this file as a copy of another
1183 1184 # file. This copy data will effectively act as a parent
1184 1185 # of this new revision. If this is a merge, the first
1185 1186 # parent will be the nullid (meaning "look up the copy data")
1186 1187 # and the second one will be the other parent. For example:
1187 1188 #
1188 1189 # 0 --- 1 --- 3 rev1 changes file foo
1189 1190 # \ / rev2 renames foo to bar and changes it
1190 1191 # \- 2 -/ rev3 should have bar with all changes and
1191 1192 # should record that bar descends from
1192 1193 # bar in rev2 and foo in rev1
1193 1194 #
1194 1195 # this allows this merge to succeed:
1195 1196 #
1196 1197 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1197 1198 # \ / merging rev3 and rev4 should use bar@rev2
1198 1199 # \- 2 --- 4 as the merge base
1199 1200 #
1200 1201
1201 1202 cfname = copy[0]
1202 1203 crev = manifest1.get(cfname)
1203 1204 newfparent = fparent2
1204 1205
1205 1206 if manifest2: # branch merge
1206 1207 if fparent2 == nullid or crev is None: # copied on remote side
1207 1208 if cfname in manifest2:
1208 1209 crev = manifest2[cfname]
1209 1210 newfparent = fparent1
1210 1211
1211 1212 # Here, we used to search backwards through history to try to find
1212 1213 # where the file copy came from if the source of a copy was not in
1213 1214 # the parent directory. However, this doesn't actually make sense to
1214 1215 # do (what does a copy from something not in your working copy even
1215 1216 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1216 1217 # the user that copy information was dropped, so if they didn't
1217 1218 # expect this outcome it can be fixed, but this is the correct
1218 1219 # behavior in this circumstance.
1219 1220
1220 1221 if crev:
1221 1222 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1222 1223 meta["copy"] = cfname
1223 1224 meta["copyrev"] = hex(crev)
1224 1225 fparent1, fparent2 = nullid, newfparent
1225 1226 else:
1226 1227 self.ui.warn(_("warning: can't find ancestor for '%s' "
1227 1228 "copied from '%s'!\n") % (fname, cfname))
1228 1229
1229 1230 elif fparent1 == nullid:
1230 1231 fparent1, fparent2 = fparent2, nullid
1231 1232 elif fparent2 != nullid:
1232 1233 # is one parent an ancestor of the other?
1233 1234 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1234 1235 if fparent1 in fparentancestors:
1235 1236 fparent1, fparent2 = fparent2, nullid
1236 1237 elif fparent2 in fparentancestors:
1237 1238 fparent2 = nullid
1238 1239
1239 1240 # is the file changed?
1240 1241 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1241 1242 changelist.append(fname)
1242 1243 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1243 1244 # are just the flags changed during merge?
1244 1245 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1245 1246 changelist.append(fname)
1246 1247
1247 1248 return fparent1
1248 1249
1249 1250 @unfilteredmethod
1250 1251 def commit(self, text="", user=None, date=None, match=None, force=False,
1251 1252 editor=False, extra={}):
1252 1253 """Add a new revision to current repository.
1253 1254
1254 1255 Revision information is gathered from the working directory,
1255 1256 match can be used to filter the committed files. If editor is
1256 1257 supplied, it is called to get a commit message.
1257 1258 """
1258 1259
1259 1260 def fail(f, msg):
1260 1261 raise util.Abort('%s: %s' % (f, msg))
1261 1262
1262 1263 if not match:
1263 1264 match = matchmod.always(self.root, '')
1264 1265
1265 1266 if not force:
1266 1267 vdirs = []
1267 1268 match.explicitdir = vdirs.append
1268 1269 match.bad = fail
1269 1270
1270 1271 wlock = self.wlock()
1271 1272 try:
1272 1273 wctx = self[None]
1273 1274 merge = len(wctx.parents()) > 1
1274 1275
1275 1276 if (not force and merge and match and
1276 1277 (match.files() or match.anypats())):
1277 1278 raise util.Abort(_('cannot partially commit a merge '
1278 1279 '(do not specify files or patterns)'))
1279 1280
1280 1281 status = self.status(match=match, clean=force)
1281 1282 if force:
1282 1283 status.modified.extend(status.clean) # mq may commit clean files
1283 1284
1284 1285 # check subrepos
1285 1286 subs = []
1286 1287 commitsubs = set()
1287 1288 newstate = wctx.substate.copy()
1288 1289 # only manage subrepos and .hgsubstate if .hgsub is present
1289 1290 if '.hgsub' in wctx:
1290 1291 # we'll decide whether to track this ourselves, thanks
1291 1292 for c in status.modified, status.added, status.removed:
1292 1293 if '.hgsubstate' in c:
1293 1294 c.remove('.hgsubstate')
1294 1295
1295 1296 # compare current state to last committed state
1296 1297 # build new substate based on last committed state
1297 1298 oldstate = wctx.p1().substate
1298 1299 for s in sorted(newstate.keys()):
1299 1300 if not match(s):
1300 1301 # ignore working copy, use old state if present
1301 1302 if s in oldstate:
1302 1303 newstate[s] = oldstate[s]
1303 1304 continue
1304 1305 if not force:
1305 1306 raise util.Abort(
1306 1307 _("commit with new subrepo %s excluded") % s)
1307 1308 if wctx.sub(s).dirty(True):
1308 1309 if not self.ui.configbool('ui', 'commitsubrepos'):
1309 1310 raise util.Abort(
1310 1311 _("uncommitted changes in subrepo %s") % s,
1311 1312 hint=_("use --subrepos for recursive commit"))
1312 1313 subs.append(s)
1313 1314 commitsubs.add(s)
1314 1315 else:
1315 1316 bs = wctx.sub(s).basestate()
1316 1317 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1317 1318 if oldstate.get(s, (None, None, None))[1] != bs:
1318 1319 subs.append(s)
1319 1320
1320 1321 # check for removed subrepos
1321 1322 for p in wctx.parents():
1322 1323 r = [s for s in p.substate if s not in newstate]
1323 1324 subs += [s for s in r if match(s)]
1324 1325 if subs:
1325 1326 if (not match('.hgsub') and
1326 1327 '.hgsub' in (wctx.modified() + wctx.added())):
1327 1328 raise util.Abort(
1328 1329 _("can't commit subrepos without .hgsub"))
1329 1330 status.modified.insert(0, '.hgsubstate')
1330 1331
1331 1332 elif '.hgsub' in status.removed:
1332 1333 # clean up .hgsubstate when .hgsub is removed
1333 1334 if ('.hgsubstate' in wctx and
1334 1335 '.hgsubstate' not in (status.modified + status.added +
1335 1336 status.removed)):
1336 1337 status.removed.insert(0, '.hgsubstate')
1337 1338
1338 1339 # make sure all explicit patterns are matched
1339 1340 if not force and match.files():
1340 1341 matched = set(status.modified + status.added + status.removed)
1341 1342
1342 1343 for f in match.files():
1343 1344 f = self.dirstate.normalize(f)
1344 1345 if f == '.' or f in matched or f in wctx.substate:
1345 1346 continue
1346 1347 if f in status.deleted:
1347 1348 fail(f, _('file not found!'))
1348 1349 if f in vdirs: # visited directory
1349 1350 d = f + '/'
1350 1351 for mf in matched:
1351 1352 if mf.startswith(d):
1352 1353 break
1353 1354 else:
1354 1355 fail(f, _("no match under directory!"))
1355 1356 elif f not in self.dirstate:
1356 1357 fail(f, _("file not tracked!"))
1357 1358
1358 1359 cctx = context.workingcommitctx(self, status,
1359 1360 text, user, date, extra)
1360 1361
1361 1362 if (not force and not extra.get("close") and not merge
1362 1363 and not cctx.files()
1363 1364 and wctx.branch() == wctx.p1().branch()):
1364 1365 return None
1365 1366
1366 1367 if merge and cctx.deleted():
1367 1368 raise util.Abort(_("cannot commit merge with missing files"))
1368 1369
1369 1370 ms = mergemod.mergestate(self)
1370 1371 for f in status.modified:
1371 1372 if f in ms and ms[f] == 'u':
1372 1373 raise util.Abort(_('unresolved merge conflicts '
1373 1374 '(see "hg help resolve")'))
1374 1375
1375 1376 if editor:
1376 1377 cctx._text = editor(self, cctx, subs)
1377 1378 edited = (text != cctx._text)
1378 1379
1379 1380 # Save commit message in case this transaction gets rolled back
1380 1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1381 1382 # the assumption that the user will use the same editor again.
1382 1383 msgfn = self.savecommitmessage(cctx._text)
1383 1384
1384 1385 # commit subs and write new state
1385 1386 if subs:
1386 1387 for s in sorted(commitsubs):
1387 1388 sub = wctx.sub(s)
1388 1389 self.ui.status(_('committing subrepository %s\n') %
1389 1390 subrepo.subrelpath(sub))
1390 1391 sr = sub.commit(cctx._text, user, date)
1391 1392 newstate[s] = (newstate[s][0], sr)
1392 1393 subrepo.writestate(self, newstate)
1393 1394
1394 1395 p1, p2 = self.dirstate.parents()
1395 1396 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1396 1397 try:
1397 1398 self.hook("precommit", throw=True, parent1=hookp1,
1398 1399 parent2=hookp2)
1399 1400 ret = self.commitctx(cctx, True)
1400 1401 except: # re-raises
1401 1402 if edited:
1402 1403 self.ui.write(
1403 1404 _('note: commit message saved in %s\n') % msgfn)
1404 1405 raise
1405 1406
1406 1407 # update bookmarks, dirstate and mergestate
1407 1408 bookmarks.update(self, [p1, p2], ret)
1408 1409 cctx.markcommitted(ret)
1409 1410 ms.reset()
1410 1411 finally:
1411 1412 wlock.release()
1412 1413
1413 1414 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1414 1415 # hack for command that use a temporary commit (eg: histedit)
1415 1416 # temporary commit got stripped before hook release
1416 1417 if node in self:
1417 1418 self.hook("commit", node=node, parent1=parent1,
1418 1419 parent2=parent2)
1419 1420 self._afterlock(commithook)
1420 1421 return ret
1421 1422
1422 1423 @unfilteredmethod
1423 1424 def commitctx(self, ctx, error=False):
1424 1425 """Add a new revision to current repository.
1425 1426 Revision information is passed via the context argument.
1426 1427 """
1427 1428
1428 1429 tr = None
1429 1430 p1, p2 = ctx.p1(), ctx.p2()
1430 1431 user = ctx.user()
1431 1432
1432 1433 lock = self.lock()
1433 1434 try:
1434 1435 tr = self.transaction("commit")
1435 1436 trp = weakref.proxy(tr)
1436 1437
1437 1438 if ctx.files():
1438 1439 m1 = p1.manifest()
1439 1440 m2 = p2.manifest()
1440 1441 m = m1.copy()
1441 1442
1442 1443 # check in files
1443 1444 added = []
1444 1445 changed = []
1445 1446 removed = list(ctx.removed())
1446 1447 linkrev = len(self)
1447 1448 self.ui.note(_("committing files:\n"))
1448 1449 for f in sorted(ctx.modified() + ctx.added()):
1449 1450 self.ui.note(f + "\n")
1450 1451 try:
1451 1452 fctx = ctx[f]
1452 1453 if fctx is None:
1453 1454 removed.append(f)
1454 1455 else:
1455 1456 added.append(f)
1456 1457 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1457 1458 trp, changed)
1458 1459 m.setflag(f, fctx.flags())
1459 1460 except OSError, inst:
1460 1461 self.ui.warn(_("trouble committing %s!\n") % f)
1461 1462 raise
1462 1463 except IOError, inst:
1463 1464 errcode = getattr(inst, 'errno', errno.ENOENT)
1464 1465 if error or errcode and errcode != errno.ENOENT:
1465 1466 self.ui.warn(_("trouble committing %s!\n") % f)
1466 1467 raise
1467 1468
1468 1469 # update manifest
1469 1470 self.ui.note(_("committing manifest\n"))
1470 1471 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1471 1472 drop = [f for f in removed if f in m]
1472 1473 for f in drop:
1473 1474 del m[f]
1474 1475 mn = self.manifest.add(m, trp, linkrev,
1475 1476 p1.manifestnode(), p2.manifestnode(),
1476 1477 added, drop)
1477 1478 files = changed + removed
1478 1479 else:
1479 1480 mn = p1.manifestnode()
1480 1481 files = []
1481 1482
1482 1483 # update changelog
1483 1484 self.ui.note(_("committing changelog\n"))
1484 1485 self.changelog.delayupdate(tr)
1485 1486 n = self.changelog.add(mn, files, ctx.description(),
1486 1487 trp, p1.node(), p2.node(),
1487 1488 user, ctx.date(), ctx.extra().copy())
1488 1489 p = lambda: tr.writepending() and self.root or ""
1489 1490 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1490 1491 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1491 1492 parent2=xp2, pending=p)
1492 1493 # set the new commit is proper phase
1493 1494 targetphase = subrepo.newcommitphase(self.ui, ctx)
1494 1495 if targetphase:
1495 1496 # retract boundary do not alter parent changeset.
1496 1497 # if a parent have higher the resulting phase will
1497 1498 # be compliant anyway
1498 1499 #
1499 1500 # if minimal phase was 0 we don't need to retract anything
1500 1501 phases.retractboundary(self, tr, targetphase, [n])
1501 1502 tr.close()
1502 1503 branchmap.updatecache(self.filtered('served'))
1503 1504 return n
1504 1505 finally:
1505 1506 if tr:
1506 1507 tr.release()
1507 1508 lock.release()
1508 1509
1509 1510 @unfilteredmethod
1510 1511 def destroying(self):
1511 1512 '''Inform the repository that nodes are about to be destroyed.
1512 1513 Intended for use by strip and rollback, so there's a common
1513 1514 place for anything that has to be done before destroying history.
1514 1515
1515 1516 This is mostly useful for saving state that is in memory and waiting
1516 1517 to be flushed when the current lock is released. Because a call to
1517 1518 destroyed is imminent, the repo will be invalidated causing those
1518 1519 changes to stay in memory (waiting for the next unlock), or vanish
1519 1520 completely.
1520 1521 '''
1521 1522 # When using the same lock to commit and strip, the phasecache is left
1522 1523 # dirty after committing. Then when we strip, the repo is invalidated,
1523 1524 # causing those changes to disappear.
1524 1525 if '_phasecache' in vars(self):
1525 1526 self._phasecache.write()
1526 1527
1527 1528 @unfilteredmethod
1528 1529 def destroyed(self):
1529 1530 '''Inform the repository that nodes have been destroyed.
1530 1531 Intended for use by strip and rollback, so there's a common
1531 1532 place for anything that has to be done after destroying history.
1532 1533 '''
1533 1534 # When one tries to:
1534 1535 # 1) destroy nodes thus calling this method (e.g. strip)
1535 1536 # 2) use phasecache somewhere (e.g. commit)
1536 1537 #
1537 1538 # then 2) will fail because the phasecache contains nodes that were
1538 1539 # removed. We can either remove phasecache from the filecache,
1539 1540 # causing it to reload next time it is accessed, or simply filter
1540 1541 # the removed nodes now and write the updated cache.
1541 1542 self._phasecache.filterunknown(self)
1542 1543 self._phasecache.write()
1543 1544
1544 1545 # update the 'served' branch cache to help read only server process
1545 1546 # Thanks to branchcache collaboration this is done from the nearest
1546 1547 # filtered subset and it is expected to be fast.
1547 1548 branchmap.updatecache(self.filtered('served'))
1548 1549
1549 1550 # Ensure the persistent tag cache is updated. Doing it now
1550 1551 # means that the tag cache only has to worry about destroyed
1551 1552 # heads immediately after a strip/rollback. That in turn
1552 1553 # guarantees that "cachetip == currenttip" (comparing both rev
1553 1554 # and node) always means no nodes have been added or destroyed.
1554 1555
1555 1556 # XXX this is suboptimal when qrefresh'ing: we strip the current
1556 1557 # head, refresh the tag cache, then immediately add a new head.
1557 1558 # But I think doing it this way is necessary for the "instant
1558 1559 # tag cache retrieval" case to work.
1559 1560 self.invalidate()
1560 1561
1561 1562 def walk(self, match, node=None):
1562 1563 '''
1563 1564 walk recursively through the directory tree or a given
1564 1565 changeset, finding all files matched by the match
1565 1566 function
1566 1567 '''
1567 1568 return self[node].walk(match)
1568 1569
1569 1570 def status(self, node1='.', node2=None, match=None,
1570 1571 ignored=False, clean=False, unknown=False,
1571 1572 listsubrepos=False):
1572 1573 '''a convenience method that calls node1.status(node2)'''
1573 1574 return self[node1].status(node2, match, ignored, clean, unknown,
1574 1575 listsubrepos)
1575 1576
1576 1577 def heads(self, start=None):
1577 1578 heads = self.changelog.heads(start)
1578 1579 # sort the output in rev descending order
1579 1580 return sorted(heads, key=self.changelog.rev, reverse=True)
1580 1581
1581 1582 def branchheads(self, branch=None, start=None, closed=False):
1582 1583 '''return a (possibly filtered) list of heads for the given branch
1583 1584
1584 1585 Heads are returned in topological order, from newest to oldest.
1585 1586 If branch is None, use the dirstate branch.
1586 1587 If start is not None, return only heads reachable from start.
1587 1588 If closed is True, return heads that are marked as closed as well.
1588 1589 '''
1589 1590 if branch is None:
1590 1591 branch = self[None].branch()
1591 1592 branches = self.branchmap()
1592 1593 if branch not in branches:
1593 1594 return []
1594 1595 # the cache returns heads ordered lowest to highest
1595 1596 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1596 1597 if start is not None:
1597 1598 # filter out the heads that cannot be reached from startrev
1598 1599 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1599 1600 bheads = [h for h in bheads if h in fbheads]
1600 1601 return bheads
1601 1602
1602 1603 def branches(self, nodes):
1603 1604 if not nodes:
1604 1605 nodes = [self.changelog.tip()]
1605 1606 b = []
1606 1607 for n in nodes:
1607 1608 t = n
1608 1609 while True:
1609 1610 p = self.changelog.parents(n)
1610 1611 if p[1] != nullid or p[0] == nullid:
1611 1612 b.append((t, n, p[0], p[1]))
1612 1613 break
1613 1614 n = p[0]
1614 1615 return b
1615 1616
1616 1617 def between(self, pairs):
1617 1618 r = []
1618 1619
1619 1620 for top, bottom in pairs:
1620 1621 n, l, i = top, [], 0
1621 1622 f = 1
1622 1623
1623 1624 while n != bottom and n != nullid:
1624 1625 p = self.changelog.parents(n)[0]
1625 1626 if i == f:
1626 1627 l.append(n)
1627 1628 f = f * 2
1628 1629 n = p
1629 1630 i += 1
1630 1631
1631 1632 r.append(l)
1632 1633
1633 1634 return r
1634 1635
1635 1636 def checkpush(self, pushop):
1636 1637 """Extensions can override this function if additional checks have
1637 1638 to be performed before pushing, or call it if they override push
1638 1639 command.
1639 1640 """
1640 1641 pass
1641 1642
1642 1643 @unfilteredpropertycache
1643 1644 def prepushoutgoinghooks(self):
1644 1645 """Return util.hooks consists of "(repo, remote, outgoing)"
1645 1646 functions, which are called before pushing changesets.
1646 1647 """
1647 1648 return util.hooks()
1648 1649
1649 1650 def stream_in(self, remote, requirements):
1650 1651 lock = self.lock()
1651 1652 try:
1652 1653 # Save remote branchmap. We will use it later
1653 1654 # to speed up branchcache creation
1654 1655 rbranchmap = None
1655 1656 if remote.capable("branchmap"):
1656 1657 rbranchmap = remote.branchmap()
1657 1658
1658 1659 fp = remote.stream_out()
1659 1660 l = fp.readline()
1660 1661 try:
1661 1662 resp = int(l)
1662 1663 except ValueError:
1663 1664 raise error.ResponseError(
1664 1665 _('unexpected response from remote server:'), l)
1665 1666 if resp == 1:
1666 1667 raise util.Abort(_('operation forbidden by server'))
1667 1668 elif resp == 2:
1668 1669 raise util.Abort(_('locking the remote repository failed'))
1669 1670 elif resp != 0:
1670 1671 raise util.Abort(_('the server sent an unknown error code'))
1671 1672 self.ui.status(_('streaming all changes\n'))
1672 1673 l = fp.readline()
1673 1674 try:
1674 1675 total_files, total_bytes = map(int, l.split(' ', 1))
1675 1676 except (ValueError, TypeError):
1676 1677 raise error.ResponseError(
1677 1678 _('unexpected response from remote server:'), l)
1678 1679 self.ui.status(_('%d files to transfer, %s of data\n') %
1679 1680 (total_files, util.bytecount(total_bytes)))
1680 1681 handled_bytes = 0
1681 1682 self.ui.progress(_('clone'), 0, total=total_bytes)
1682 1683 start = time.time()
1683 1684
1684 1685 tr = self.transaction(_('clone'))
1685 1686 try:
1686 1687 for i in xrange(total_files):
1687 1688 # XXX doesn't support '\n' or '\r' in filenames
1688 1689 l = fp.readline()
1689 1690 try:
1690 1691 name, size = l.split('\0', 1)
1691 1692 size = int(size)
1692 1693 except (ValueError, TypeError):
1693 1694 raise error.ResponseError(
1694 1695 _('unexpected response from remote server:'), l)
1695 1696 if self.ui.debugflag:
1696 1697 self.ui.debug('adding %s (%s)\n' %
1697 1698 (name, util.bytecount(size)))
1698 1699 # for backwards compat, name was partially encoded
1699 1700 ofp = self.svfs(store.decodedir(name), 'w')
1700 1701 for chunk in util.filechunkiter(fp, limit=size):
1701 1702 handled_bytes += len(chunk)
1702 1703 self.ui.progress(_('clone'), handled_bytes,
1703 1704 total=total_bytes)
1704 1705 ofp.write(chunk)
1705 1706 ofp.close()
1706 1707 tr.close()
1707 1708 finally:
1708 1709 tr.release()
1709 1710
1710 1711 # Writing straight to files circumvented the inmemory caches
1711 1712 self.invalidate()
1712 1713
1713 1714 elapsed = time.time() - start
1714 1715 if elapsed <= 0:
1715 1716 elapsed = 0.001
1716 1717 self.ui.progress(_('clone'), None)
1717 1718 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1718 1719 (util.bytecount(total_bytes), elapsed,
1719 1720 util.bytecount(total_bytes / elapsed)))
1720 1721
1721 1722 # new requirements = old non-format requirements +
1722 1723 # new format-related
1723 1724 # requirements from the streamed-in repository
1724 1725 requirements.update(set(self.requirements) - self.supportedformats)
1725 1726 self._applyrequirements(requirements)
1726 1727 self._writerequirements()
1727 1728
1728 1729 if rbranchmap:
1729 1730 rbheads = []
1730 1731 closed = []
1731 1732 for bheads in rbranchmap.itervalues():
1732 1733 rbheads.extend(bheads)
1733 1734 for h in bheads:
1734 1735 r = self.changelog.rev(h)
1735 1736 b, c = self.changelog.branchinfo(r)
1736 1737 if c:
1737 1738 closed.append(h)
1738 1739
1739 1740 if rbheads:
1740 1741 rtiprev = max((int(self.changelog.rev(node))
1741 1742 for node in rbheads))
1742 1743 cache = branchmap.branchcache(rbranchmap,
1743 1744 self[rtiprev].node(),
1744 1745 rtiprev,
1745 1746 closednodes=closed)
1746 1747 # Try to stick it as low as possible
1747 1748 # filter above served are unlikely to be fetch from a clone
1748 1749 for candidate in ('base', 'immutable', 'served'):
1749 1750 rview = self.filtered(candidate)
1750 1751 if cache.validfor(rview):
1751 1752 self._branchcaches[candidate] = cache
1752 1753 cache.write(rview)
1753 1754 break
1754 1755 self.invalidate()
1755 1756 return len(self.heads()) + 1
1756 1757 finally:
1757 1758 lock.release()
1758 1759
1759 1760 def clone(self, remote, heads=[], stream=None):
1760 1761 '''clone remote repository.
1761 1762
1762 1763 keyword arguments:
1763 1764 heads: list of revs to clone (forces use of pull)
1764 1765 stream: use streaming clone if possible'''
1765 1766
1766 1767 # now, all clients that can request uncompressed clones can
1767 1768 # read repo formats supported by all servers that can serve
1768 1769 # them.
1769 1770
1770 1771 # if revlog format changes, client will have to check version
1771 1772 # and format flags on "stream" capability, and use
1772 1773 # uncompressed only if compatible.
1773 1774
1774 1775 if stream is None:
1775 1776 # if the server explicitly prefers to stream (for fast LANs)
1776 1777 stream = remote.capable('stream-preferred')
1777 1778
1778 1779 if stream and not heads:
1779 1780 # 'stream' means remote revlog format is revlogv1 only
1780 1781 if remote.capable('stream'):
1781 1782 self.stream_in(remote, set(('revlogv1',)))
1782 1783 else:
1783 1784 # otherwise, 'streamreqs' contains the remote revlog format
1784 1785 streamreqs = remote.capable('streamreqs')
1785 1786 if streamreqs:
1786 1787 streamreqs = set(streamreqs.split(','))
1787 1788 # if we support it, stream in and adjust our requirements
1788 1789 if not streamreqs - self.supportedformats:
1789 1790 self.stream_in(remote, streamreqs)
1790 1791
1791 1792 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1792 1793 try:
1793 1794 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1794 1795 ret = exchange.pull(self, remote, heads).cgresult
1795 1796 finally:
1796 1797 self.ui.restoreconfig(quiet)
1797 1798 return ret
1798 1799
1799 1800 def pushkey(self, namespace, key, old, new):
1800 1801 try:
1801 1802 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1802 1803 old=old, new=new)
1803 1804 except error.HookAbort, exc:
1804 1805 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1805 1806 if exc.hint:
1806 1807 self.ui.write_err(_("(%s)\n") % exc.hint)
1807 1808 return False
1808 1809 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1809 1810 ret = pushkey.push(self, namespace, key, old, new)
1810 1811 def runhook():
1811 1812 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1812 1813 ret=ret)
1813 1814 self._afterlock(runhook)
1814 1815 return ret
1815 1816
1816 1817 def listkeys(self, namespace):
1817 1818 self.hook('prelistkeys', throw=True, namespace=namespace)
1818 1819 self.ui.debug('listing keys for "%s"\n' % namespace)
1819 1820 values = pushkey.list(self, namespace)
1820 1821 self.hook('listkeys', namespace=namespace, values=values)
1821 1822 return values
1822 1823
1823 1824 def debugwireargs(self, one, two, three=None, four=None, five=None):
1824 1825 '''used to test argument passing over the wire'''
1825 1826 return "%s %s %s %s %s" % (one, two, three, four, five)
1826 1827
1827 1828 def savecommitmessage(self, text):
1828 1829 fp = self.vfs('last-message.txt', 'wb')
1829 1830 try:
1830 1831 fp.write(text)
1831 1832 finally:
1832 1833 fp.close()
1833 1834 return self.pathto(fp.name[len(self.root) + 1:])
1834 1835
1835 1836 # used to avoid circular references so destructors work
1836 1837 def aftertrans(files):
1837 1838 renamefiles = [tuple(t) for t in files]
1838 1839 def a():
1839 1840 for vfs, src, dest in renamefiles:
1840 1841 try:
1841 1842 vfs.rename(src, dest)
1842 1843 except OSError: # journal file does not yet exist
1843 1844 pass
1844 1845 return a
1845 1846
1846 1847 def undoname(fn):
1847 1848 base, name = os.path.split(fn)
1848 1849 assert name.startswith('journal')
1849 1850 return os.path.join(base, name.replace('journal', 'undo', 1))
1850 1851
1851 1852 def instance(ui, path, create):
1852 1853 return localrepository(ui, util.urllocalpath(path), create)
1853 1854
1854 1855 def islocal(path):
1855 1856 return True
@@ -1,838 +1,797
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [phases]
3 3 > # public changeset are not obsolete
4 4 > publish=false
5 5 > [ui]
6 6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg log -T "{node}\n" --hidden -r "desc('$1')"
15 15 > }
16 16
17 17 $ cat > debugkeys.py <<EOF
18 18 > def reposetup(ui, repo):
19 19 > class debugkeysrepo(repo.__class__):
20 20 > def listkeys(self, namespace):
21 21 > ui.write('listkeys %s\n' % (namespace,))
22 22 > return super(debugkeysrepo, self).listkeys(namespace)
23 23 >
24 24 > if repo.local():
25 25 > repo.__class__ = debugkeysrepo
26 26 > EOF
27 27
28 28 $ hg init tmpa
29 29 $ cd tmpa
30 30 $ mkcommit kill_me
31 31
32 32 Checking that the feature is properly disabled
33 33
34 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 35 abort: creating obsolete markers is not enabled on this repo
36 36 [255]
37 37
38 38 Enabling it
39 39
40 40 $ cat >> $HGRCPATH << EOF
41 41 > [experimental]
42 42 > evolution=createmarkers,exchange
43 43 > EOF
44 44
45 45 Killing a single changeset without replacement
46 46
47 47 $ hg debugobsolete 0
48 48 abort: changeset references must be full hexadecimal node identifiers
49 49 [255]
50 50 $ hg debugobsolete '00'
51 51 abort: changeset references must be full hexadecimal node identifiers
52 52 [255]
53 53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
54 54 $ hg debugobsolete
55 55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
56 56
57 57 (test that mercurial is not confused)
58 58
59 59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
60 60 $ hg tip
61 61 -1:000000000000 (public) [tip ]
62 62 $ hg up --hidden tip --quiet
63 63
64 64 Killing a single changeset with itself should fail
65 65 (simple local safeguard)
66 66
67 67 $ hg debugobsolete `getid kill_me` `getid kill_me`
68 68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
69 69 [255]
70 70
71 71 $ cd ..
72 72
73 73 Killing a single changeset with replacement
74 74 (and testing the format option)
75 75
76 76 $ hg init tmpb
77 77 $ cd tmpb
78 78 $ mkcommit a
79 79 $ mkcommit b
80 80 $ mkcommit original_c
81 81 $ hg up "desc('b')"
82 82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 83 $ mkcommit new_c
84 84 created new head
85 85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
86 86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
87 87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 88 2:245bde4270cd add original_c
89 89 $ hg debugrevlog -cd
90 90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
91 91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
92 92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
93 93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
94 94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
95 95 $ hg debugobsolete
96 96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
97 97
98 98 (check for version number of the obsstore)
99 99
100 100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
101 101 \x00 (no-eol) (esc)
102 102
103 103 do it again (it read the obsstore before adding new changeset)
104 104
105 105 $ hg up '.^'
106 106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 107 $ mkcommit new_2_c
108 108 created new head
109 109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
110 110 $ hg debugobsolete
111 111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
112 112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
113 113
114 114 Register two markers with a missing node
115 115
116 116 $ hg up '.^'
117 117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
118 118 $ mkcommit new_3_c
119 119 created new head
120 120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
121 121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
122 122 $ hg debugobsolete
123 123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
124 124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
125 125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
126 126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
127 127
128 128 Refuse pathological nullid successors
129 129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
130 130 transaction abort!
131 131 rollback completed
132 132 abort: bad obsolescence marker detected: invalid successors nullid
133 133 [255]
134 134
135 135 Check that graphlog detect that a changeset is obsolete:
136 136
137 137 $ hg log -G
138 138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
139 139 |
140 140 o 1:7c3bad9141dc (draft) [ ] add b
141 141 |
142 142 o 0:1f0dee641bb7 (draft) [ ] add a
143 143
144 144
145 145 check that heads does not report them
146 146
147 147 $ hg heads
148 148 5:5601fb93a350 (draft) [tip ] add new_3_c
149 149 $ hg heads --hidden
150 150 5:5601fb93a350 (draft) [tip ] add new_3_c
151 151 4:ca819180edb9 (draft) [ ] add new_2_c
152 152 3:cdbce2fbb163 (draft) [ ] add new_c
153 153 2:245bde4270cd (draft) [ ] add original_c
154 154
155 155
156 156 check that summary does not report them
157 157
158 158 $ hg init ../sink
159 159 $ echo '[paths]' >> .hg/hgrc
160 160 $ echo 'default=../sink' >> .hg/hgrc
161 161 $ hg summary --remote
162 162 parent: 5:5601fb93a350 tip
163 163 add new_3_c
164 164 branch: default
165 165 commit: (clean)
166 166 update: (current)
167 167 remote: 3 outgoing
168 168
169 169 $ hg summary --remote --hidden
170 170 parent: 5:5601fb93a350 tip
171 171 add new_3_c
172 172 branch: default
173 173 commit: (clean)
174 174 update: 3 new changesets, 4 branch heads (merge)
175 175 remote: 3 outgoing
176 176
177 177 check that various commands work well with filtering
178 178
179 179 $ hg tip
180 180 5:5601fb93a350 (draft) [tip ] add new_3_c
181 181 $ hg log -r 6
182 182 abort: unknown revision '6'!
183 183 [255]
184 184 $ hg log -r 4
185 185 abort: hidden revision '4'!
186 186 (use --hidden to access hidden revisions)
187 187 [255]
188 188 $ hg debugrevspec 'rev(6)'
189 189 $ hg debugrevspec 'rev(4)'
190 190 $ hg debugrevspec 'null'
191 191 -1
192 192
193 193 Check that public changeset are not accounted as obsolete:
194 194
195 195 $ hg --hidden phase --public 2
196 196 $ hg log -G
197 197 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
198 198 |
199 199 | o 2:245bde4270cd (public) [ ] add original_c
200 200 |/
201 201 o 1:7c3bad9141dc (public) [ ] add b
202 202 |
203 203 o 0:1f0dee641bb7 (public) [ ] add a
204 204
205 205
206 206 And that bumped changeset are detected
207 207 --------------------------------------
208 208
209 209 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
210 210 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
211 211 the public changeset
212 212
213 213 $ hg log --hidden -r 'bumped()'
214 214 5:5601fb93a350 (draft) [tip ] add new_3_c
215 215
216 216 And that we can't push bumped changeset
217 217
218 218 $ hg push ../tmpa -r 0 --force #(make repo related)
219 219 pushing to ../tmpa
220 220 searching for changes
221 221 warning: repository is unrelated
222 222 adding changesets
223 223 adding manifests
224 224 adding file changes
225 225 added 1 changesets with 1 changes to 1 files (+1 heads)
226 226 $ hg push ../tmpa
227 227 pushing to ../tmpa
228 228 searching for changes
229 229 abort: push includes bumped changeset: 5601fb93a350!
230 230 [255]
231 231
232 232 Fixing "bumped" situation
233 233 We need to create a clone of 5 and add a special marker with a flag
234 234
235 235 $ hg up '5^'
236 236 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
237 237 $ hg revert -ar 5
238 238 adding new_3_c
239 239 $ hg ci -m 'add n3w_3_c'
240 240 created new head
241 241 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
242 242 $ hg log -r 'bumped()'
243 243 $ hg log -G
244 244 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
245 245 |
246 246 | o 2:245bde4270cd (public) [ ] add original_c
247 247 |/
248 248 o 1:7c3bad9141dc (public) [ ] add b
249 249 |
250 250 o 0:1f0dee641bb7 (public) [ ] add a
251 251
252 252
253 253
254 254
255 255 $ cd ..
256 256
257 257 Exchange Test
258 258 ============================
259 259
260 260 Destination repo does not have any data
261 261 ---------------------------------------
262 262
263 263 Simple incoming test
264 264
265 265 $ hg init tmpc
266 266 $ cd tmpc
267 267 $ hg incoming ../tmpb
268 268 comparing with ../tmpb
269 269 0:1f0dee641bb7 (public) [ ] add a
270 270 1:7c3bad9141dc (public) [ ] add b
271 271 2:245bde4270cd (public) [ ] add original_c
272 272 6:6f9641995072 (draft) [tip ] add n3w_3_c
273 273
274 274 Try to pull markers
275 275 (extinct changeset are excluded but marker are pushed)
276 276
277 277 $ hg pull ../tmpb
278 278 pulling from ../tmpb
279 279 requesting all changes
280 280 adding changesets
281 281 adding manifests
282 282 adding file changes
283 283 added 4 changesets with 4 changes to 4 files (+1 heads)
284 284 (run 'hg heads' to see heads, 'hg merge' to merge)
285 285 $ hg debugobsolete
286 286 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
287 287 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
288 288 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
289 289 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
290 290 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
291 291
292 292 Rollback//Transaction support
293 293
294 294 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
295 295 $ hg debugobsolete
296 296 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
297 297 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
298 298 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
299 299 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
300 300 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
301 301 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
302 302 $ hg rollback -n
303 303 repository tip rolled back to revision 3 (undo debugobsolete)
304 304 $ hg rollback
305 305 repository tip rolled back to revision 3 (undo debugobsolete)
306 306 $ hg debugobsolete
307 307 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
308 308 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
309 309 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
310 310 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
311 311 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
312 312
313 313 $ cd ..
314 314
315 315 Try to push markers
316 316
317 317 $ hg init tmpd
318 318 $ hg -R tmpb push tmpd
319 319 pushing to tmpd
320 320 searching for changes
321 321 adding changesets
322 322 adding manifests
323 323 adding file changes
324 324 added 4 changesets with 4 changes to 4 files (+1 heads)
325 325 $ hg -R tmpd debugobsolete | sort
326 326 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
327 327 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
328 328 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
329 329 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
330 330 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
331 331
332 332 Check obsolete keys are exchanged only if source has an obsolete store
333 333
334 334 $ hg init empty
335 335 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
336 336 pushing to tmpd
337 337 listkeys phases
338 338 listkeys bookmarks
339 339 no changes found
340 340 listkeys phases
341 341 [1]
342 342
343 343 clone support
344 344 (markers are copied and extinct changesets are included to allow hardlinks)
345 345
346 346 $ hg clone tmpb clone-dest
347 347 updating to branch default
348 348 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
349 349 $ hg -R clone-dest log -G --hidden
350 350 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
351 351 |
352 352 | x 5:5601fb93a350 (draft) [ ] add new_3_c
353 353 |/
354 354 | x 4:ca819180edb9 (draft) [ ] add new_2_c
355 355 |/
356 356 | x 3:cdbce2fbb163 (draft) [ ] add new_c
357 357 |/
358 358 | o 2:245bde4270cd (public) [ ] add original_c
359 359 |/
360 360 o 1:7c3bad9141dc (public) [ ] add b
361 361 |
362 362 o 0:1f0dee641bb7 (public) [ ] add a
363 363
364 364 $ hg -R clone-dest debugobsolete
365 365 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
366 366 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
367 367 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
368 368 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
369 369 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
370 370
371 371
372 372 Destination repo have existing data
373 373 ---------------------------------------
374 374
375 375 On pull
376 376
377 377 $ hg init tmpe
378 378 $ cd tmpe
379 379 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
380 380 $ hg pull ../tmpb
381 381 pulling from ../tmpb
382 382 requesting all changes
383 383 adding changesets
384 384 adding manifests
385 385 adding file changes
386 386 added 4 changesets with 4 changes to 4 files (+1 heads)
387 387 (run 'hg heads' to see heads, 'hg merge' to merge)
388 388 $ hg debugobsolete
389 389 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
390 390 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
391 391 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
392 392 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
393 393 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
394 394 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
395 395
396 396
397 397 On push
398 398
399 399 $ hg push ../tmpc
400 400 pushing to ../tmpc
401 401 searching for changes
402 402 no changes found
403 403 [1]
404 404 $ hg -R ../tmpc debugobsolete
405 405 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
406 406 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
407 407 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
408 408 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
409 409 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
410 410 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
411 411
412 412 detect outgoing obsolete and unstable
413 413 ---------------------------------------
414 414
415 415
416 416 $ hg log -G
417 417 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
418 418 |
419 419 | o 2:245bde4270cd (public) [ ] add original_c
420 420 |/
421 421 o 1:7c3bad9141dc (public) [ ] add b
422 422 |
423 423 o 0:1f0dee641bb7 (public) [ ] add a
424 424
425 425 $ hg up 'desc("n3w_3_c")'
426 426 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
427 427 $ mkcommit original_d
428 428 $ mkcommit original_e
429 429 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
430 430 $ hg debugobsolete | grep `getid original_d`
431 431 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
432 432 $ hg log -r 'obsolete()'
433 433 4:94b33453f93b (draft) [ ] add original_d
434 434 $ hg log -G -r '::unstable()'
435 435 @ 5:cda648ca50f5 (draft) [tip ] add original_e
436 436 |
437 437 x 4:94b33453f93b (draft) [ ] add original_d
438 438 |
439 439 o 3:6f9641995072 (draft) [ ] add n3w_3_c
440 440 |
441 441 o 1:7c3bad9141dc (public) [ ] add b
442 442 |
443 443 o 0:1f0dee641bb7 (public) [ ] add a
444 444
445 445
446 446 refuse to push obsolete changeset
447 447
448 448 $ hg push ../tmpc/ -r 'desc("original_d")'
449 449 pushing to ../tmpc/
450 450 searching for changes
451 451 abort: push includes obsolete changeset: 94b33453f93b!
452 452 [255]
453 453
454 454 refuse to push unstable changeset
455 455
456 456 $ hg push ../tmpc/
457 457 pushing to ../tmpc/
458 458 searching for changes
459 459 abort: push includes unstable changeset: cda648ca50f5!
460 460 [255]
461 461
462 462 Test that extinct changeset are properly detected
463 463
464 464 $ hg log -r 'extinct()'
465 465
466 466 Don't try to push extinct changeset
467 467
468 468 $ hg init ../tmpf
469 469 $ hg out ../tmpf
470 470 comparing with ../tmpf
471 471 searching for changes
472 472 0:1f0dee641bb7 (public) [ ] add a
473 473 1:7c3bad9141dc (public) [ ] add b
474 474 2:245bde4270cd (public) [ ] add original_c
475 475 3:6f9641995072 (draft) [ ] add n3w_3_c
476 476 4:94b33453f93b (draft) [ ] add original_d
477 477 5:cda648ca50f5 (draft) [tip ] add original_e
478 478 $ hg push ../tmpf -f # -f because be push unstable too
479 479 pushing to ../tmpf
480 480 searching for changes
481 481 adding changesets
482 482 adding manifests
483 483 adding file changes
484 484 added 6 changesets with 6 changes to 6 files (+1 heads)
485 485
486 486 no warning displayed
487 487
488 488 $ hg push ../tmpf
489 489 pushing to ../tmpf
490 490 searching for changes
491 491 no changes found
492 492 [1]
493 493
494 494 Do not warn about new head when the new head is a successors of a remote one
495 495
496 496 $ hg log -G
497 497 @ 5:cda648ca50f5 (draft) [tip ] add original_e
498 498 |
499 499 x 4:94b33453f93b (draft) [ ] add original_d
500 500 |
501 501 o 3:6f9641995072 (draft) [ ] add n3w_3_c
502 502 |
503 503 | o 2:245bde4270cd (public) [ ] add original_c
504 504 |/
505 505 o 1:7c3bad9141dc (public) [ ] add b
506 506 |
507 507 o 0:1f0dee641bb7 (public) [ ] add a
508 508
509 509 $ hg up -q 'desc(n3w_3_c)'
510 510 $ mkcommit obsolete_e
511 511 created new head
512 512 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
513 513 $ hg outgoing ../tmpf # parasite hg outgoing testin
514 514 comparing with ../tmpf
515 515 searching for changes
516 516 6:3de5eca88c00 (draft) [tip ] add obsolete_e
517 517 $ hg push ../tmpf
518 518 pushing to ../tmpf
519 519 searching for changes
520 520 adding changesets
521 521 adding manifests
522 522 adding file changes
523 523 added 1 changesets with 1 changes to 1 files (+1 heads)
524 524
525 525 test relevance computation
526 526 ---------------------------------------
527 527
528 528 Checking simple case of "marker relevance".
529 529
530 530
531 531 Reminder of the repo situation
532 532
533 533 $ hg log --hidden --graph
534 534 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
535 535 |
536 536 | x 5:cda648ca50f5 (draft) [ ] add original_e
537 537 | |
538 538 | x 4:94b33453f93b (draft) [ ] add original_d
539 539 |/
540 540 o 3:6f9641995072 (draft) [ ] add n3w_3_c
541 541 |
542 542 | o 2:245bde4270cd (public) [ ] add original_c
543 543 |/
544 544 o 1:7c3bad9141dc (public) [ ] add b
545 545 |
546 546 o 0:1f0dee641bb7 (public) [ ] add a
547 547
548 548
549 549 List of all markers
550 550
551 551 $ hg debugobsolete
552 552 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
553 553 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
554 554 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
555 555 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
556 556 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
557 557 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
558 558 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
559 559 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
560 560
561 561 List of changesets with no chain
562 562
563 563 $ hg debugobsolete --hidden --rev ::2
564 564
565 565 List of changesets that are included on marker chain
566 566
567 567 $ hg debugobsolete --hidden --rev 6
568 568 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
569 569
570 570 List of changesets with a longer chain, (including a pruned children)
571 571
572 572 $ hg debugobsolete --hidden --rev 3
573 573 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
574 574 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
575 575 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
576 576 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
577 577 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
578 578 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
579 579 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
580 580
581 581 List of both
582 582
583 583 $ hg debugobsolete --hidden --rev 3::6
584 584 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
585 585 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
586 586 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
587 587 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
588 588 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
589 589 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
590 590 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
591 591 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
592 592
593 593 #if serve
594 594
595 595 check hgweb does not explode
596 596 ====================================
597 597
598 598 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
599 599 adding changesets
600 600 adding manifests
601 601 adding file changes
602 602 added 62 changesets with 63 changes to 9 files (+60 heads)
603 603 (run 'hg heads .' to see heads, 'hg merge' to merge)
604 604 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
605 605 > do
606 606 > hg debugobsolete $node
607 607 > done
608 608 $ hg up tip
609 609 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
610 610
611 611 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
612 612 $ cat hg.pid >> $DAEMON_PIDS
613 613
614 614 check changelog view
615 615
616 616 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
617 617 200 Script output follows
618 618
619 619 check graph view
620 620
621 621 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
622 622 200 Script output follows
623 623
624 624 check filelog view
625 625
626 626 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
627 627 200 Script output follows
628 628
629 629 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
630 630 200 Script output follows
631 631 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
632 632 404 Not Found
633 633 [1]
634 634
635 635 check that web.view config option:
636 636
637 637 $ "$TESTDIR/killdaemons.py" hg.pid
638 638 $ cat >> .hg/hgrc << EOF
639 639 > [web]
640 640 > view=all
641 641 > EOF
642 642 $ wait
643 643 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
644 644 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
645 645 200 Script output follows
646 646 $ "$TESTDIR/killdaemons.py" hg.pid
647 647
648 648 Checking _enable=False warning if obsolete marker exists
649 649
650 650 $ echo '[experimental]' >> $HGRCPATH
651 651 $ echo "evolution=" >> $HGRCPATH
652 652 $ hg log -r tip
653 653 obsolete feature not enabled but 68 markers found!
654 654 68:c15e9edfca13 (draft) [tip ] add celestine
655 655
656 656 reenable for later test
657 657
658 658 $ echo '[experimental]' >> $HGRCPATH
659 659 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
660 660
661 661 #endif
662 662
663 663 Test incoming/outcoming with changesets obsoleted remotely, known locally
664 664 ===============================================================================
665 665
666 666 This test issue 3805
667 667
668 668 $ hg init repo-issue3805
669 669 $ cd repo-issue3805
670 670 $ echo "foo" > foo
671 671 $ hg ci -Am "A"
672 672 adding foo
673 673 $ hg clone . ../other-issue3805
674 674 updating to branch default
675 675 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
676 676 $ echo "bar" >> foo
677 677 $ hg ci --amend
678 678 $ cd ../other-issue3805
679 679 $ hg log -G
680 680 @ 0:193e9254ce7e (draft) [tip ] A
681 681
682 682 $ hg log -G -R ../repo-issue3805
683 683 @ 2:3816541e5485 (draft) [tip ] A
684 684
685 685 $ hg incoming
686 686 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
687 687 searching for changes
688 688 2:3816541e5485 (draft) [tip ] A
689 689 $ hg incoming --bundle ../issue3805.hg
690 690 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
691 691 searching for changes
692 692 2:3816541e5485 (draft) [tip ] A
693 693 $ hg outgoing
694 694 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
695 695 searching for changes
696 696 no changes found
697 697 [1]
698 698
699 699 #if serve
700 700
701 701 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
702 702 $ cat hg.pid >> $DAEMON_PIDS
703 703
704 704 $ hg incoming http://localhost:$HGPORT
705 705 comparing with http://localhost:$HGPORT/
706 706 searching for changes
707 707 1:3816541e5485 (draft) [tip ] A
708 708 $ hg outgoing http://localhost:$HGPORT
709 709 comparing with http://localhost:$HGPORT/
710 710 searching for changes
711 711 no changes found
712 712 [1]
713 713
714 714 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
715 715
716 716 #endif
717 717
718 718 This test issue 3814
719 719
720 720 (nothing to push but locally hidden changeset)
721 721
722 722 $ cd ..
723 723 $ hg init repo-issue3814
724 724 $ cd repo-issue3805
725 725 $ hg push -r 3816541e5485 ../repo-issue3814
726 726 pushing to ../repo-issue3814
727 727 searching for changes
728 728 adding changesets
729 729 adding manifests
730 730 adding file changes
731 731 added 1 changesets with 1 changes to 1 files
732 732 $ hg out ../repo-issue3814
733 733 comparing with ../repo-issue3814
734 734 searching for changes
735 735 no changes found
736 736 [1]
737 737
738 738 Test that a local tag blocks a changeset from being hidden
739 739
740 740 $ hg tag -l visible -r 0 --hidden
741 741 $ hg log -G
742 742 @ 2:3816541e5485 (draft) [tip ] A
743 743
744 744 x 0:193e9254ce7e (draft) [visible ] A
745 745
746 746 Test that removing a local tag does not cause some commands to fail
747 747
748 748 $ hg tag -l -r tip tiptag
749 749 $ hg tags
750 750 tiptag 2:3816541e5485
751 751 tip 2:3816541e5485
752 752 visible 0:193e9254ce7e
753 753 $ hg --config extensions.strip= strip -r tip --no-backup
754 754 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
755 755 $ hg tags
756 756 visible 0:193e9254ce7e
757 757 tip 0:193e9254ce7e
758 758
759 759 #if serve
760 760
761 761 Test issue 4506
762 762
763 763 $ cd ..
764 764 $ hg init repo-issue4506
765 765 $ cd repo-issue4506
766 766 $ echo "0" > foo
767 767 $ hg add foo
768 768 $ hg ci -m "content-0"
769 769
770 770 $ hg up null
771 771 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
772 772 $ echo "1" > bar
773 773 $ hg add bar
774 774 $ hg ci -m "content-1"
775 775 created new head
776 776 $ hg up 0
777 777 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
778 778 $ hg graft 1
779 779 grafting 1:1c9eddb02162 "content-1" (tip)
780 780
781 781 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
782 782
783 783 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
784 784 $ cat hg.pid >> $DAEMON_PIDS
785 785
786 786 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/1'
787 787 404 Not Found
788 788 [1]
789 789 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'file/tip/bar'
790 790 200 Script output follows
791 791 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'annotate/tip/bar'
792 792 200 Script output follows
793 793
794 794 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
795 795
796 796 #endif
797 797
798 $ hg init a
799 $ cd a
800 $ touch foo
801 $ hg add foo
802 $ hg ci -mfoo
803 $ touch bar
804 $ hg add bar
805 $ hg ci -mbar
806 $ hg up 0
807 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
808 $ touch quux
809 $ hg add quux
810 $ hg ci -m quux
811 created new head
812 $ hg up 1
813 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
814 $ hg tag 1.0
815
816 $ hg up 2
817 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
818 $ hg log -G
819 o 3:bc47fc7e1c1d (draft) [tip ] Added tag 1.0 for changeset 50c889141114
820 |
821 | @ 2:3d7f255a0081 (draft) [ ] quux
822 | |
823 o | 1:50c889141114 (draft) [1.0 ] bar
824 |/
825 o 0:1f7b0de80e11 (draft) [ ] foo
826
827 $ hg debugobsolete `getid bar`
828 $ hg debugobsolete `getid 1.0`
829 $ hg tag 1.0
830 $ hg log -G
831 @ 4:f9f2ab71ffd5 (draft) [tip ] Added tag 1.0 for changeset 3d7f255a0081
832 |
833 o 2:3d7f255a0081 (draft) [1.0 ] quux
834 |
835 o 0:1f7b0de80e11 (draft) [ ] foo
836
837 $ cat .hgtags
838 3d7f255a008103380aeb2a7d581fe257f40969e7 1.0
General Comments 0
You need to be logged in to leave comments. Login now