##// END OF EJS Templates
localrepo: make journal.dirstate contain in-memory changes before transaction...
FUJIWARA Katsunori -
r25878:800e090e stable
parent child Browse files
Show More
@@ -1,1949 +1,1952 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 # experimental config: format.generaldelta
262 262 if self.ui.configbool('format', 'generaldelta', False):
263 263 self.requirements.add("generaldelta")
264 264 if self.ui.configbool('experimental', 'treemanifest', False):
265 265 self.requirements.add("treemanifest")
266 266 if self.ui.configbool('experimental', 'manifestv2', False):
267 267 self.requirements.add("manifestv2")
268 268 else:
269 269 raise error.RepoError(_("repository %s not found") % path)
270 270 elif create:
271 271 raise error.RepoError(_("repository %s already exists") % path)
272 272 else:
273 273 try:
274 274 self.requirements = scmutil.readrequires(
275 275 self.vfs, self.supported)
276 276 except IOError as inst:
277 277 if inst.errno != errno.ENOENT:
278 278 raise
279 279
280 280 self.sharedpath = self.path
281 281 try:
282 282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 283 realpath=True)
284 284 s = vfs.base
285 285 if not vfs.exists():
286 286 raise error.RepoError(
287 287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 288 self.sharedpath = s
289 289 except IOError as inst:
290 290 if inst.errno != errno.ENOENT:
291 291 raise
292 292
293 293 self.store = store.store(
294 294 self.requirements, self.sharedpath, scmutil.vfs)
295 295 self.spath = self.store.path
296 296 self.svfs = self.store.vfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 357
358 358 def _writerequirements(self):
359 359 scmutil.writerequires(self.vfs, self.requirements)
360 360
361 361 def _checknested(self, path):
362 362 """Determine if path is a legal nested repository."""
363 363 if not path.startswith(self.root):
364 364 return False
365 365 subpath = path[len(self.root) + 1:]
366 366 normsubpath = util.pconvert(subpath)
367 367
368 368 # XXX: Checking against the current working copy is wrong in
369 369 # the sense that it can reject things like
370 370 #
371 371 # $ hg cat -r 10 sub/x.txt
372 372 #
373 373 # if sub/ is no longer a subrepository in the working copy
374 374 # parent revision.
375 375 #
376 376 # However, it can of course also allow things that would have
377 377 # been rejected before, such as the above cat command if sub/
378 378 # is a subrepository now, but was a normal directory before.
379 379 # The old path auditor would have rejected by mistake since it
380 380 # panics when it sees sub/.hg/.
381 381 #
382 382 # All in all, checking against the working copy seems sensible
383 383 # since we want to prevent access to nested repositories on
384 384 # the filesystem *now*.
385 385 ctx = self[None]
386 386 parts = util.splitpath(subpath)
387 387 while parts:
388 388 prefix = '/'.join(parts)
389 389 if prefix in ctx.substate:
390 390 if prefix == normsubpath:
391 391 return True
392 392 else:
393 393 sub = ctx.sub(prefix)
394 394 return sub.checknested(subpath[len(prefix) + 1:])
395 395 else:
396 396 parts.pop()
397 397 return False
398 398
399 399 def peer(self):
400 400 return localpeer(self) # not cached to avoid reference cycle
401 401
402 402 def unfiltered(self):
403 403 """Return unfiltered version of the repository
404 404
405 405 Intended to be overwritten by filtered repo."""
406 406 return self
407 407
408 408 def filtered(self, name):
409 409 """Return a filtered version of a repository"""
410 410 # build a new class with the mixin and the current class
411 411 # (possibly subclass of the repo)
412 412 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 413 pass
414 414 return proxycls(self, name)
415 415
416 416 @repofilecache('bookmarks')
417 417 def _bookmarks(self):
418 418 return bookmarks.bmstore(self)
419 419
420 420 @repofilecache('bookmarks.current')
421 421 def _activebookmark(self):
422 422 return bookmarks.readactive(self)
423 423
424 424 def bookmarkheads(self, bookmark):
425 425 name = bookmark.split('@', 1)[0]
426 426 heads = []
427 427 for mark, n in self._bookmarks.iteritems():
428 428 if mark.split('@', 1)[0] == name:
429 429 heads.append(n)
430 430 return heads
431 431
432 432 @storecache('phaseroots')
433 433 def _phasecache(self):
434 434 return phases.phasecache(self, self._phasedefaults)
435 435
436 436 @storecache('obsstore')
437 437 def obsstore(self):
438 438 # read default format for new obsstore.
439 439 # developer config: format.obsstore-version
440 440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 441 # rely on obsstore class default when possible.
442 442 kwargs = {}
443 443 if defaultformat is not None:
444 444 kwargs['defaultformat'] = defaultformat
445 445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 447 **kwargs)
448 448 if store and readonly:
449 449 self.ui.warn(
450 450 _('obsolete feature not enabled but %i markers found!\n')
451 451 % len(list(store)))
452 452 return store
453 453
454 454 @storecache('00changelog.i')
455 455 def changelog(self):
456 456 c = changelog.changelog(self.svfs)
457 457 if 'HG_PENDING' in os.environ:
458 458 p = os.environ['HG_PENDING']
459 459 if p.startswith(self.root):
460 460 c.readpending('00changelog.i.a')
461 461 return c
462 462
463 463 @storecache('00manifest.i')
464 464 def manifest(self):
465 465 return manifest.manifest(self.svfs)
466 466
467 467 def dirlog(self, dir):
468 468 return self.manifest.dirlog(dir)
469 469
470 470 @repofilecache('dirstate')
471 471 def dirstate(self):
472 472 warned = [0]
473 473 def validate(node):
474 474 try:
475 475 self.changelog.rev(node)
476 476 return node
477 477 except error.LookupError:
478 478 if not warned[0]:
479 479 warned[0] = True
480 480 self.ui.warn(_("warning: ignoring unknown"
481 481 " working parent %s!\n") % short(node))
482 482 return nullid
483 483
484 484 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
485 485
486 486 def __getitem__(self, changeid):
487 487 if changeid is None or changeid == wdirrev:
488 488 return context.workingctx(self)
489 489 if isinstance(changeid, slice):
490 490 return [context.changectx(self, i)
491 491 for i in xrange(*changeid.indices(len(self)))
492 492 if i not in self.changelog.filteredrevs]
493 493 return context.changectx(self, changeid)
494 494
495 495 def __contains__(self, changeid):
496 496 try:
497 497 self[changeid]
498 498 return True
499 499 except error.RepoLookupError:
500 500 return False
501 501
502 502 def __nonzero__(self):
503 503 return True
504 504
505 505 def __len__(self):
506 506 return len(self.changelog)
507 507
508 508 def __iter__(self):
509 509 return iter(self.changelog)
510 510
511 511 def revs(self, expr, *args):
512 512 '''Return a list of revisions matching the given revset'''
513 513 expr = revset.formatspec(expr, *args)
514 514 m = revset.match(None, expr)
515 515 return m(self)
516 516
517 517 def set(self, expr, *args):
518 518 '''
519 519 Yield a context for each matching revision, after doing arg
520 520 replacement via revset.formatspec
521 521 '''
522 522 for r in self.revs(expr, *args):
523 523 yield self[r]
524 524
525 525 def url(self):
526 526 return 'file:' + self.root
527 527
528 528 def hook(self, name, throw=False, **args):
529 529 """Call a hook, passing this repo instance.
530 530
531 531 This a convenience method to aid invoking hooks. Extensions likely
532 532 won't call this unless they have registered a custom hook or are
533 533 replacing code that is expected to call a hook.
534 534 """
535 535 return hook.hook(self.ui, self, name, throw, **args)
536 536
537 537 @unfilteredmethod
538 538 def _tag(self, names, node, message, local, user, date, extra={},
539 539 editor=False):
540 540 if isinstance(names, str):
541 541 names = (names,)
542 542
543 543 branches = self.branchmap()
544 544 for name in names:
545 545 self.hook('pretag', throw=True, node=hex(node), tag=name,
546 546 local=local)
547 547 if name in branches:
548 548 self.ui.warn(_("warning: tag %s conflicts with existing"
549 549 " branch name\n") % name)
550 550
551 551 def writetags(fp, names, munge, prevtags):
552 552 fp.seek(0, 2)
553 553 if prevtags and prevtags[-1] != '\n':
554 554 fp.write('\n')
555 555 for name in names:
556 556 if munge:
557 557 m = munge(name)
558 558 else:
559 559 m = name
560 560
561 561 if (self._tagscache.tagtypes and
562 562 name in self._tagscache.tagtypes):
563 563 old = self.tags().get(name, nullid)
564 564 fp.write('%s %s\n' % (hex(old), m))
565 565 fp.write('%s %s\n' % (hex(node), m))
566 566 fp.close()
567 567
568 568 prevtags = ''
569 569 if local:
570 570 try:
571 571 fp = self.vfs('localtags', 'r+')
572 572 except IOError:
573 573 fp = self.vfs('localtags', 'a')
574 574 else:
575 575 prevtags = fp.read()
576 576
577 577 # local tags are stored in the current charset
578 578 writetags(fp, names, None, prevtags)
579 579 for name in names:
580 580 self.hook('tag', node=hex(node), tag=name, local=local)
581 581 return
582 582
583 583 try:
584 584 fp = self.wfile('.hgtags', 'rb+')
585 585 except IOError as e:
586 586 if e.errno != errno.ENOENT:
587 587 raise
588 588 fp = self.wfile('.hgtags', 'ab')
589 589 else:
590 590 prevtags = fp.read()
591 591
592 592 # committed tags are stored in UTF-8
593 593 writetags(fp, names, encoding.fromlocal, prevtags)
594 594
595 595 fp.close()
596 596
597 597 self.invalidatecaches()
598 598
599 599 if '.hgtags' not in self.dirstate:
600 600 self[None].add(['.hgtags'])
601 601
602 602 m = matchmod.exact(self.root, '', ['.hgtags'])
603 603 tagnode = self.commit(message, user, date, extra=extra, match=m,
604 604 editor=editor)
605 605
606 606 for name in names:
607 607 self.hook('tag', node=hex(node), tag=name, local=local)
608 608
609 609 return tagnode
610 610
611 611 def tag(self, names, node, message, local, user, date, editor=False):
612 612 '''tag a revision with one or more symbolic names.
613 613
614 614 names is a list of strings or, when adding a single tag, names may be a
615 615 string.
616 616
617 617 if local is True, the tags are stored in a per-repository file.
618 618 otherwise, they are stored in the .hgtags file, and a new
619 619 changeset is committed with the change.
620 620
621 621 keyword arguments:
622 622
623 623 local: whether to store tags in non-version-controlled file
624 624 (default False)
625 625
626 626 message: commit message to use if committing
627 627
628 628 user: name of user to use if committing
629 629
630 630 date: date tuple to use if committing'''
631 631
632 632 if not local:
633 633 m = matchmod.exact(self.root, '', ['.hgtags'])
634 634 if any(self.status(match=m, unknown=True, ignored=True)):
635 635 raise util.Abort(_('working copy of .hgtags is changed'),
636 636 hint=_('please commit .hgtags manually'))
637 637
638 638 self.tags() # instantiate the cache
639 639 self._tag(names, node, message, local, user, date, editor=editor)
640 640
641 641 @filteredpropertycache
642 642 def _tagscache(self):
643 643 '''Returns a tagscache object that contains various tags related
644 644 caches.'''
645 645
646 646 # This simplifies its cache management by having one decorated
647 647 # function (this one) and the rest simply fetch things from it.
648 648 class tagscache(object):
649 649 def __init__(self):
650 650 # These two define the set of tags for this repository. tags
651 651 # maps tag name to node; tagtypes maps tag name to 'global' or
652 652 # 'local'. (Global tags are defined by .hgtags across all
653 653 # heads, and local tags are defined in .hg/localtags.)
654 654 # They constitute the in-memory cache of tags.
655 655 self.tags = self.tagtypes = None
656 656
657 657 self.nodetagscache = self.tagslist = None
658 658
659 659 cache = tagscache()
660 660 cache.tags, cache.tagtypes = self._findtags()
661 661
662 662 return cache
663 663
664 664 def tags(self):
665 665 '''return a mapping of tag to node'''
666 666 t = {}
667 667 if self.changelog.filteredrevs:
668 668 tags, tt = self._findtags()
669 669 else:
670 670 tags = self._tagscache.tags
671 671 for k, v in tags.iteritems():
672 672 try:
673 673 # ignore tags to unknown nodes
674 674 self.changelog.rev(v)
675 675 t[k] = v
676 676 except (error.LookupError, ValueError):
677 677 pass
678 678 return t
679 679
680 680 def _findtags(self):
681 681 '''Do the hard work of finding tags. Return a pair of dicts
682 682 (tags, tagtypes) where tags maps tag name to node, and tagtypes
683 683 maps tag name to a string like \'global\' or \'local\'.
684 684 Subclasses or extensions are free to add their own tags, but
685 685 should be aware that the returned dicts will be retained for the
686 686 duration of the localrepo object.'''
687 687
688 688 # XXX what tagtype should subclasses/extensions use? Currently
689 689 # mq and bookmarks add tags, but do not set the tagtype at all.
690 690 # Should each extension invent its own tag type? Should there
691 691 # be one tagtype for all such "virtual" tags? Or is the status
692 692 # quo fine?
693 693
694 694 alltags = {} # map tag name to (node, hist)
695 695 tagtypes = {}
696 696
697 697 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
698 698 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
699 699
700 700 # Build the return dicts. Have to re-encode tag names because
701 701 # the tags module always uses UTF-8 (in order not to lose info
702 702 # writing to the cache), but the rest of Mercurial wants them in
703 703 # local encoding.
704 704 tags = {}
705 705 for (name, (node, hist)) in alltags.iteritems():
706 706 if node != nullid:
707 707 tags[encoding.tolocal(name)] = node
708 708 tags['tip'] = self.changelog.tip()
709 709 tagtypes = dict([(encoding.tolocal(name), value)
710 710 for (name, value) in tagtypes.iteritems()])
711 711 return (tags, tagtypes)
712 712
713 713 def tagtype(self, tagname):
714 714 '''
715 715 return the type of the given tag. result can be:
716 716
717 717 'local' : a local tag
718 718 'global' : a global tag
719 719 None : tag does not exist
720 720 '''
721 721
722 722 return self._tagscache.tagtypes.get(tagname)
723 723
724 724 def tagslist(self):
725 725 '''return a list of tags ordered by revision'''
726 726 if not self._tagscache.tagslist:
727 727 l = []
728 728 for t, n in self.tags().iteritems():
729 729 l.append((self.changelog.rev(n), t, n))
730 730 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
731 731
732 732 return self._tagscache.tagslist
733 733
734 734 def nodetags(self, node):
735 735 '''return the tags associated with a node'''
736 736 if not self._tagscache.nodetagscache:
737 737 nodetagscache = {}
738 738 for t, n in self._tagscache.tags.iteritems():
739 739 nodetagscache.setdefault(n, []).append(t)
740 740 for tags in nodetagscache.itervalues():
741 741 tags.sort()
742 742 self._tagscache.nodetagscache = nodetagscache
743 743 return self._tagscache.nodetagscache.get(node, [])
744 744
745 745 def nodebookmarks(self, node):
746 746 marks = []
747 747 for bookmark, n in self._bookmarks.iteritems():
748 748 if n == node:
749 749 marks.append(bookmark)
750 750 return sorted(marks)
751 751
752 752 def branchmap(self):
753 753 '''returns a dictionary {branch: [branchheads]} with branchheads
754 754 ordered by increasing revision number'''
755 755 branchmap.updatecache(self)
756 756 return self._branchcaches[self.filtername]
757 757
758 758 @unfilteredmethod
759 759 def revbranchcache(self):
760 760 if not self._revbranchcache:
761 761 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
762 762 return self._revbranchcache
763 763
764 764 def branchtip(self, branch, ignoremissing=False):
765 765 '''return the tip node for a given branch
766 766
767 767 If ignoremissing is True, then this method will not raise an error.
768 768 This is helpful for callers that only expect None for a missing branch
769 769 (e.g. namespace).
770 770
771 771 '''
772 772 try:
773 773 return self.branchmap().branchtip(branch)
774 774 except KeyError:
775 775 if not ignoremissing:
776 776 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
777 777 else:
778 778 pass
779 779
780 780 def lookup(self, key):
781 781 return self[key].node()
782 782
783 783 def lookupbranch(self, key, remote=None):
784 784 repo = remote or self
785 785 if key in repo.branchmap():
786 786 return key
787 787
788 788 repo = (remote and remote.local()) and remote or self
789 789 return repo[key].branch()
790 790
791 791 def known(self, nodes):
792 792 nm = self.changelog.nodemap
793 793 pc = self._phasecache
794 794 result = []
795 795 for n in nodes:
796 796 r = nm.get(n)
797 797 resp = not (r is None or pc.phase(self, r) >= phases.secret)
798 798 result.append(resp)
799 799 return result
800 800
801 801 def local(self):
802 802 return self
803 803
804 804 def publishing(self):
805 805 # it's safe (and desirable) to trust the publish flag unconditionally
806 806 # so that we don't finalize changes shared between users via ssh or nfs
807 807 return self.ui.configbool('phases', 'publish', True, untrusted=True)
808 808
809 809 def cancopy(self):
810 810 # so statichttprepo's override of local() works
811 811 if not self.local():
812 812 return False
813 813 if not self.publishing():
814 814 return True
815 815 # if publishing we can't copy if there is filtered content
816 816 return not self.filtered('visible').changelog.filteredrevs
817 817
818 818 def shared(self):
819 819 '''the type of shared repository (None if not shared)'''
820 820 if self.sharedpath != self.path:
821 821 return 'store'
822 822 return None
823 823
824 824 def join(self, f, *insidef):
825 825 return self.vfs.join(os.path.join(f, *insidef))
826 826
827 827 def wjoin(self, f, *insidef):
828 828 return self.vfs.reljoin(self.root, f, *insidef)
829 829
830 830 def file(self, f):
831 831 if f[0] == '/':
832 832 f = f[1:]
833 833 return filelog.filelog(self.svfs, f)
834 834
835 835 def changectx(self, changeid):
836 836 return self[changeid]
837 837
838 838 def parents(self, changeid=None):
839 839 '''get list of changectxs for parents of changeid'''
840 840 return self[changeid].parents()
841 841
842 842 def setparents(self, p1, p2=nullid):
843 843 self.dirstate.beginparentchange()
844 844 copies = self.dirstate.setparents(p1, p2)
845 845 pctx = self[p1]
846 846 if copies:
847 847 # Adjust copy records, the dirstate cannot do it, it
848 848 # requires access to parents manifests. Preserve them
849 849 # only for entries added to first parent.
850 850 for f in copies:
851 851 if f not in pctx and copies[f] in pctx:
852 852 self.dirstate.copy(copies[f], f)
853 853 if p2 == nullid:
854 854 for f, s in sorted(self.dirstate.copies().items()):
855 855 if f not in pctx and s not in pctx:
856 856 self.dirstate.copy(None, f)
857 857 self.dirstate.endparentchange()
858 858
859 859 def filectx(self, path, changeid=None, fileid=None):
860 860 """changeid can be a changeset revision, node, or tag.
861 861 fileid can be a file revision or node."""
862 862 return context.filectx(self, path, changeid, fileid)
863 863
864 864 def getcwd(self):
865 865 return self.dirstate.getcwd()
866 866
867 867 def pathto(self, f, cwd=None):
868 868 return self.dirstate.pathto(f, cwd)
869 869
870 870 def wfile(self, f, mode='r'):
871 871 return self.wvfs(f, mode)
872 872
873 873 def _link(self, f):
874 874 return self.wvfs.islink(f)
875 875
876 876 def _loadfilter(self, filter):
877 877 if filter not in self.filterpats:
878 878 l = []
879 879 for pat, cmd in self.ui.configitems(filter):
880 880 if cmd == '!':
881 881 continue
882 882 mf = matchmod.match(self.root, '', [pat])
883 883 fn = None
884 884 params = cmd
885 885 for name, filterfn in self._datafilters.iteritems():
886 886 if cmd.startswith(name):
887 887 fn = filterfn
888 888 params = cmd[len(name):].lstrip()
889 889 break
890 890 if not fn:
891 891 fn = lambda s, c, **kwargs: util.filter(s, c)
892 892 # Wrap old filters not supporting keyword arguments
893 893 if not inspect.getargspec(fn)[2]:
894 894 oldfn = fn
895 895 fn = lambda s, c, **kwargs: oldfn(s, c)
896 896 l.append((mf, fn, params))
897 897 self.filterpats[filter] = l
898 898 return self.filterpats[filter]
899 899
900 900 def _filter(self, filterpats, filename, data):
901 901 for mf, fn, cmd in filterpats:
902 902 if mf(filename):
903 903 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 904 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 905 break
906 906
907 907 return data
908 908
909 909 @unfilteredpropertycache
910 910 def _encodefilterpats(self):
911 911 return self._loadfilter('encode')
912 912
913 913 @unfilteredpropertycache
914 914 def _decodefilterpats(self):
915 915 return self._loadfilter('decode')
916 916
917 917 def adddatafilter(self, name, filter):
918 918 self._datafilters[name] = filter
919 919
920 920 def wread(self, filename):
921 921 if self._link(filename):
922 922 data = self.wvfs.readlink(filename)
923 923 else:
924 924 data = self.wvfs.read(filename)
925 925 return self._filter(self._encodefilterpats, filename, data)
926 926
927 927 def wwrite(self, filename, data, flags):
928 928 """write ``data`` into ``filename`` in the working directory
929 929
930 930 This returns length of written (maybe decoded) data.
931 931 """
932 932 data = self._filter(self._decodefilterpats, filename, data)
933 933 if 'l' in flags:
934 934 self.wvfs.symlink(data, filename)
935 935 else:
936 936 self.wvfs.write(filename, data)
937 937 if 'x' in flags:
938 938 self.wvfs.setflags(filename, False, True)
939 939 return len(data)
940 940
941 941 def wwritedata(self, filename, data):
942 942 return self._filter(self._decodefilterpats, filename, data)
943 943
944 944 def currenttransaction(self):
945 945 """return the current transaction or None if non exists"""
946 946 if self._transref:
947 947 tr = self._transref()
948 948 else:
949 949 tr = None
950 950
951 951 if tr and tr.running():
952 952 return tr
953 953 return None
954 954
955 955 def transaction(self, desc, report=None):
956 956 if (self.ui.configbool('devel', 'all-warnings')
957 957 or self.ui.configbool('devel', 'check-locks')):
958 958 l = self._lockref and self._lockref()
959 959 if l is None or not l.held:
960 960 self.ui.develwarn('transaction with no lock')
961 961 tr = self.currenttransaction()
962 962 if tr is not None:
963 963 return tr.nest()
964 964
965 965 # abort here if the journal already exists
966 966 if self.svfs.exists("journal"):
967 967 raise error.RepoError(
968 968 _("abandoned transaction found"),
969 969 hint=_("run 'hg recover' to clean up transaction"))
970 970
971 # make journal.dirstate contain in-memory changes at this point
972 self.dirstate.write()
973
971 974 idbase = "%.40f#%f" % (random.random(), time.time())
972 975 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
973 976 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
974 977
975 978 self._writejournal(desc)
976 979 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
977 980 if report:
978 981 rp = report
979 982 else:
980 983 rp = self.ui.warn
981 984 vfsmap = {'plain': self.vfs} # root of .hg/
982 985 # we must avoid cyclic reference between repo and transaction.
983 986 reporef = weakref.ref(self)
984 987 def validate(tr):
985 988 """will run pre-closing hooks"""
986 989 pending = lambda: tr.writepending() and self.root or ""
987 990 reporef().hook('pretxnclose', throw=True, pending=pending,
988 991 txnname=desc, **tr.hookargs)
989 992
990 993 tr = transaction.transaction(rp, self.svfs, vfsmap,
991 994 "journal",
992 995 "undo",
993 996 aftertrans(renames),
994 997 self.store.createmode,
995 998 validator=validate)
996 999
997 1000 tr.hookargs['txnid'] = txnid
998 1001 # note: writing the fncache only during finalize mean that the file is
999 1002 # outdated when running hooks. As fncache is used for streaming clone,
1000 1003 # this is not expected to break anything that happen during the hooks.
1001 1004 tr.addfinalize('flush-fncache', self.store.write)
1002 1005 def txnclosehook(tr2):
1003 1006 """To be run if transaction is successful, will schedule a hook run
1004 1007 """
1005 1008 def hook():
1006 1009 reporef().hook('txnclose', throw=False, txnname=desc,
1007 1010 **tr2.hookargs)
1008 1011 reporef()._afterlock(hook)
1009 1012 tr.addfinalize('txnclose-hook', txnclosehook)
1010 1013 def txnaborthook(tr2):
1011 1014 """To be run if transaction is aborted
1012 1015 """
1013 1016 reporef().hook('txnabort', throw=False, txnname=desc,
1014 1017 **tr2.hookargs)
1015 1018 tr.addabort('txnabort-hook', txnaborthook)
1016 1019 self._transref = weakref.ref(tr)
1017 1020 return tr
1018 1021
1019 1022 def _journalfiles(self):
1020 1023 return ((self.svfs, 'journal'),
1021 1024 (self.vfs, 'journal.dirstate'),
1022 1025 (self.vfs, 'journal.branch'),
1023 1026 (self.vfs, 'journal.desc'),
1024 1027 (self.vfs, 'journal.bookmarks'),
1025 1028 (self.svfs, 'journal.phaseroots'))
1026 1029
1027 1030 def undofiles(self):
1028 1031 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1029 1032
1030 1033 def _writejournal(self, desc):
1031 1034 self.vfs.write("journal.dirstate",
1032 1035 self.vfs.tryread("dirstate"))
1033 1036 self.vfs.write("journal.branch",
1034 1037 encoding.fromlocal(self.dirstate.branch()))
1035 1038 self.vfs.write("journal.desc",
1036 1039 "%d\n%s\n" % (len(self), desc))
1037 1040 self.vfs.write("journal.bookmarks",
1038 1041 self.vfs.tryread("bookmarks"))
1039 1042 self.svfs.write("journal.phaseroots",
1040 1043 self.svfs.tryread("phaseroots"))
1041 1044
1042 1045 def recover(self):
1043 1046 lock = self.lock()
1044 1047 try:
1045 1048 if self.svfs.exists("journal"):
1046 1049 self.ui.status(_("rolling back interrupted transaction\n"))
1047 1050 vfsmap = {'': self.svfs,
1048 1051 'plain': self.vfs,}
1049 1052 transaction.rollback(self.svfs, vfsmap, "journal",
1050 1053 self.ui.warn)
1051 1054 self.invalidate()
1052 1055 return True
1053 1056 else:
1054 1057 self.ui.warn(_("no interrupted transaction available\n"))
1055 1058 return False
1056 1059 finally:
1057 1060 lock.release()
1058 1061
1059 1062 def rollback(self, dryrun=False, force=False):
1060 1063 wlock = lock = None
1061 1064 try:
1062 1065 wlock = self.wlock()
1063 1066 lock = self.lock()
1064 1067 if self.svfs.exists("undo"):
1065 1068 return self._rollback(dryrun, force)
1066 1069 else:
1067 1070 self.ui.warn(_("no rollback information available\n"))
1068 1071 return 1
1069 1072 finally:
1070 1073 release(lock, wlock)
1071 1074
1072 1075 @unfilteredmethod # Until we get smarter cache management
1073 1076 def _rollback(self, dryrun, force):
1074 1077 ui = self.ui
1075 1078 try:
1076 1079 args = self.vfs.read('undo.desc').splitlines()
1077 1080 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1078 1081 if len(args) >= 3:
1079 1082 detail = args[2]
1080 1083 oldtip = oldlen - 1
1081 1084
1082 1085 if detail and ui.verbose:
1083 1086 msg = (_('repository tip rolled back to revision %s'
1084 1087 ' (undo %s: %s)\n')
1085 1088 % (oldtip, desc, detail))
1086 1089 else:
1087 1090 msg = (_('repository tip rolled back to revision %s'
1088 1091 ' (undo %s)\n')
1089 1092 % (oldtip, desc))
1090 1093 except IOError:
1091 1094 msg = _('rolling back unknown transaction\n')
1092 1095 desc = None
1093 1096
1094 1097 if not force and self['.'] != self['tip'] and desc == 'commit':
1095 1098 raise util.Abort(
1096 1099 _('rollback of last commit while not checked out '
1097 1100 'may lose data'), hint=_('use -f to force'))
1098 1101
1099 1102 ui.status(msg)
1100 1103 if dryrun:
1101 1104 return 0
1102 1105
1103 1106 parents = self.dirstate.parents()
1104 1107 self.destroying()
1105 1108 vfsmap = {'plain': self.vfs, '': self.svfs}
1106 1109 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1107 1110 if self.vfs.exists('undo.bookmarks'):
1108 1111 self.vfs.rename('undo.bookmarks', 'bookmarks')
1109 1112 if self.svfs.exists('undo.phaseroots'):
1110 1113 self.svfs.rename('undo.phaseroots', 'phaseroots')
1111 1114 self.invalidate()
1112 1115
1113 1116 parentgone = (parents[0] not in self.changelog.nodemap or
1114 1117 parents[1] not in self.changelog.nodemap)
1115 1118 if parentgone:
1116 1119 self.vfs.rename('undo.dirstate', 'dirstate')
1117 1120 try:
1118 1121 branch = self.vfs.read('undo.branch')
1119 1122 self.dirstate.setbranch(encoding.tolocal(branch))
1120 1123 except IOError:
1121 1124 ui.warn(_('named branch could not be reset: '
1122 1125 'current branch is still \'%s\'\n')
1123 1126 % self.dirstate.branch())
1124 1127
1125 1128 self.dirstate.invalidate()
1126 1129 parents = tuple([p.rev() for p in self.parents()])
1127 1130 if len(parents) > 1:
1128 1131 ui.status(_('working directory now based on '
1129 1132 'revisions %d and %d\n') % parents)
1130 1133 else:
1131 1134 ui.status(_('working directory now based on '
1132 1135 'revision %d\n') % parents)
1133 1136 ms = mergemod.mergestate(self)
1134 1137 ms.reset(self['.'].node())
1135 1138
1136 1139 # TODO: if we know which new heads may result from this rollback, pass
1137 1140 # them to destroy(), which will prevent the branchhead cache from being
1138 1141 # invalidated.
1139 1142 self.destroyed()
1140 1143 return 0
1141 1144
1142 1145 def invalidatecaches(self):
1143 1146
1144 1147 if '_tagscache' in vars(self):
1145 1148 # can't use delattr on proxy
1146 1149 del self.__dict__['_tagscache']
1147 1150
1148 1151 self.unfiltered()._branchcaches.clear()
1149 1152 self.invalidatevolatilesets()
1150 1153
1151 1154 def invalidatevolatilesets(self):
1152 1155 self.filteredrevcache.clear()
1153 1156 obsolete.clearobscaches(self)
1154 1157
1155 1158 def invalidatedirstate(self):
1156 1159 '''Invalidates the dirstate, causing the next call to dirstate
1157 1160 to check if it was modified since the last time it was read,
1158 1161 rereading it if it has.
1159 1162
1160 1163 This is different to dirstate.invalidate() that it doesn't always
1161 1164 rereads the dirstate. Use dirstate.invalidate() if you want to
1162 1165 explicitly read the dirstate again (i.e. restoring it to a previous
1163 1166 known good state).'''
1164 1167 if hasunfilteredcache(self, 'dirstate'):
1165 1168 for k in self.dirstate._filecache:
1166 1169 try:
1167 1170 delattr(self.dirstate, k)
1168 1171 except AttributeError:
1169 1172 pass
1170 1173 delattr(self.unfiltered(), 'dirstate')
1171 1174
1172 1175 def invalidate(self):
1173 1176 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1174 1177 for k in self._filecache:
1175 1178 # dirstate is invalidated separately in invalidatedirstate()
1176 1179 if k == 'dirstate':
1177 1180 continue
1178 1181
1179 1182 try:
1180 1183 delattr(unfiltered, k)
1181 1184 except AttributeError:
1182 1185 pass
1183 1186 self.invalidatecaches()
1184 1187 self.store.invalidatecaches()
1185 1188
1186 1189 def invalidateall(self):
1187 1190 '''Fully invalidates both store and non-store parts, causing the
1188 1191 subsequent operation to reread any outside changes.'''
1189 1192 # extension should hook this to invalidate its caches
1190 1193 self.invalidate()
1191 1194 self.invalidatedirstate()
1192 1195
1193 1196 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1194 1197 try:
1195 1198 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1196 1199 except error.LockHeld as inst:
1197 1200 if not wait:
1198 1201 raise
1199 1202 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1200 1203 (desc, inst.locker))
1201 1204 # default to 600 seconds timeout
1202 1205 l = lockmod.lock(vfs, lockname,
1203 1206 int(self.ui.config("ui", "timeout", "600")),
1204 1207 releasefn, desc=desc)
1205 1208 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1206 1209 if acquirefn:
1207 1210 acquirefn()
1208 1211 return l
1209 1212
1210 1213 def _afterlock(self, callback):
1211 1214 """add a callback to be run when the repository is fully unlocked
1212 1215
1213 1216 The callback will be executed when the outermost lock is released
1214 1217 (with wlock being higher level than 'lock')."""
1215 1218 for ref in (self._wlockref, self._lockref):
1216 1219 l = ref and ref()
1217 1220 if l and l.held:
1218 1221 l.postrelease.append(callback)
1219 1222 break
1220 1223 else: # no lock have been found.
1221 1224 callback()
1222 1225
1223 1226 def lock(self, wait=True):
1224 1227 '''Lock the repository store (.hg/store) and return a weak reference
1225 1228 to the lock. Use this before modifying the store (e.g. committing or
1226 1229 stripping). If you are opening a transaction, get a lock as well.)
1227 1230
1228 1231 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1229 1232 'wlock' first to avoid a dead-lock hazard.'''
1230 1233 l = self._lockref and self._lockref()
1231 1234 if l is not None and l.held:
1232 1235 l.lock()
1233 1236 return l
1234 1237
1235 1238 def unlock():
1236 1239 for k, ce in self._filecache.items():
1237 1240 if k == 'dirstate' or k not in self.__dict__:
1238 1241 continue
1239 1242 ce.refresh()
1240 1243
1241 1244 l = self._lock(self.svfs, "lock", wait, unlock,
1242 1245 self.invalidate, _('repository %s') % self.origroot)
1243 1246 self._lockref = weakref.ref(l)
1244 1247 return l
1245 1248
1246 1249 def wlock(self, wait=True):
1247 1250 '''Lock the non-store parts of the repository (everything under
1248 1251 .hg except .hg/store) and return a weak reference to the lock.
1249 1252
1250 1253 Use this before modifying files in .hg.
1251 1254
1252 1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1253 1256 'wlock' first to avoid a dead-lock hazard.'''
1254 1257 l = self._wlockref and self._wlockref()
1255 1258 if l is not None and l.held:
1256 1259 l.lock()
1257 1260 return l
1258 1261
1259 1262 # We do not need to check for non-waiting lock aquisition. Such
1260 1263 # acquisition would not cause dead-lock as they would just fail.
1261 1264 if wait and (self.ui.configbool('devel', 'all-warnings')
1262 1265 or self.ui.configbool('devel', 'check-locks')):
1263 1266 l = self._lockref and self._lockref()
1264 1267 if l is not None and l.held:
1265 1268 self.ui.develwarn('"wlock" acquired after "lock"')
1266 1269
1267 1270 def unlock():
1268 1271 if self.dirstate.pendingparentchange():
1269 1272 self.dirstate.invalidate()
1270 1273 else:
1271 1274 self.dirstate.write()
1272 1275
1273 1276 self._filecache['dirstate'].refresh()
1274 1277
1275 1278 l = self._lock(self.vfs, "wlock", wait, unlock,
1276 1279 self.invalidatedirstate, _('working directory of %s') %
1277 1280 self.origroot)
1278 1281 self._wlockref = weakref.ref(l)
1279 1282 return l
1280 1283
1281 1284 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1282 1285 """
1283 1286 commit an individual file as part of a larger transaction
1284 1287 """
1285 1288
1286 1289 fname = fctx.path()
1287 1290 fparent1 = manifest1.get(fname, nullid)
1288 1291 fparent2 = manifest2.get(fname, nullid)
1289 1292 if isinstance(fctx, context.filectx):
1290 1293 node = fctx.filenode()
1291 1294 if node in [fparent1, fparent2]:
1292 1295 self.ui.debug('reusing %s filelog entry\n' % fname)
1293 1296 return node
1294 1297
1295 1298 flog = self.file(fname)
1296 1299 meta = {}
1297 1300 copy = fctx.renamed()
1298 1301 if copy and copy[0] != fname:
1299 1302 # Mark the new revision of this file as a copy of another
1300 1303 # file. This copy data will effectively act as a parent
1301 1304 # of this new revision. If this is a merge, the first
1302 1305 # parent will be the nullid (meaning "look up the copy data")
1303 1306 # and the second one will be the other parent. For example:
1304 1307 #
1305 1308 # 0 --- 1 --- 3 rev1 changes file foo
1306 1309 # \ / rev2 renames foo to bar and changes it
1307 1310 # \- 2 -/ rev3 should have bar with all changes and
1308 1311 # should record that bar descends from
1309 1312 # bar in rev2 and foo in rev1
1310 1313 #
1311 1314 # this allows this merge to succeed:
1312 1315 #
1313 1316 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1314 1317 # \ / merging rev3 and rev4 should use bar@rev2
1315 1318 # \- 2 --- 4 as the merge base
1316 1319 #
1317 1320
1318 1321 cfname = copy[0]
1319 1322 crev = manifest1.get(cfname)
1320 1323 newfparent = fparent2
1321 1324
1322 1325 if manifest2: # branch merge
1323 1326 if fparent2 == nullid or crev is None: # copied on remote side
1324 1327 if cfname in manifest2:
1325 1328 crev = manifest2[cfname]
1326 1329 newfparent = fparent1
1327 1330
1328 1331 # Here, we used to search backwards through history to try to find
1329 1332 # where the file copy came from if the source of a copy was not in
1330 1333 # the parent directory. However, this doesn't actually make sense to
1331 1334 # do (what does a copy from something not in your working copy even
1332 1335 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1333 1336 # the user that copy information was dropped, so if they didn't
1334 1337 # expect this outcome it can be fixed, but this is the correct
1335 1338 # behavior in this circumstance.
1336 1339
1337 1340 if crev:
1338 1341 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1339 1342 meta["copy"] = cfname
1340 1343 meta["copyrev"] = hex(crev)
1341 1344 fparent1, fparent2 = nullid, newfparent
1342 1345 else:
1343 1346 self.ui.warn(_("warning: can't find ancestor for '%s' "
1344 1347 "copied from '%s'!\n") % (fname, cfname))
1345 1348
1346 1349 elif fparent1 == nullid:
1347 1350 fparent1, fparent2 = fparent2, nullid
1348 1351 elif fparent2 != nullid:
1349 1352 # is one parent an ancestor of the other?
1350 1353 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1351 1354 if fparent1 in fparentancestors:
1352 1355 fparent1, fparent2 = fparent2, nullid
1353 1356 elif fparent2 in fparentancestors:
1354 1357 fparent2 = nullid
1355 1358
1356 1359 # is the file changed?
1357 1360 text = fctx.data()
1358 1361 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1359 1362 changelist.append(fname)
1360 1363 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1361 1364 # are just the flags changed during merge?
1362 1365 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1363 1366 changelist.append(fname)
1364 1367
1365 1368 return fparent1
1366 1369
1367 1370 @unfilteredmethod
1368 1371 def commit(self, text="", user=None, date=None, match=None, force=False,
1369 1372 editor=False, extra={}):
1370 1373 """Add a new revision to current repository.
1371 1374
1372 1375 Revision information is gathered from the working directory,
1373 1376 match can be used to filter the committed files. If editor is
1374 1377 supplied, it is called to get a commit message.
1375 1378 """
1376 1379
1377 1380 def fail(f, msg):
1378 1381 raise util.Abort('%s: %s' % (f, msg))
1379 1382
1380 1383 if not match:
1381 1384 match = matchmod.always(self.root, '')
1382 1385
1383 1386 if not force:
1384 1387 vdirs = []
1385 1388 match.explicitdir = vdirs.append
1386 1389 match.bad = fail
1387 1390
1388 1391 wlock = self.wlock()
1389 1392 try:
1390 1393 wctx = self[None]
1391 1394 merge = len(wctx.parents()) > 1
1392 1395
1393 1396 if not force and merge and match.ispartial():
1394 1397 raise util.Abort(_('cannot partially commit a merge '
1395 1398 '(do not specify files or patterns)'))
1396 1399
1397 1400 status = self.status(match=match, clean=force)
1398 1401 if force:
1399 1402 status.modified.extend(status.clean) # mq may commit clean files
1400 1403
1401 1404 # check subrepos
1402 1405 subs = []
1403 1406 commitsubs = set()
1404 1407 newstate = wctx.substate.copy()
1405 1408 # only manage subrepos and .hgsubstate if .hgsub is present
1406 1409 if '.hgsub' in wctx:
1407 1410 # we'll decide whether to track this ourselves, thanks
1408 1411 for c in status.modified, status.added, status.removed:
1409 1412 if '.hgsubstate' in c:
1410 1413 c.remove('.hgsubstate')
1411 1414
1412 1415 # compare current state to last committed state
1413 1416 # build new substate based on last committed state
1414 1417 oldstate = wctx.p1().substate
1415 1418 for s in sorted(newstate.keys()):
1416 1419 if not match(s):
1417 1420 # ignore working copy, use old state if present
1418 1421 if s in oldstate:
1419 1422 newstate[s] = oldstate[s]
1420 1423 continue
1421 1424 if not force:
1422 1425 raise util.Abort(
1423 1426 _("commit with new subrepo %s excluded") % s)
1424 1427 dirtyreason = wctx.sub(s).dirtyreason(True)
1425 1428 if dirtyreason:
1426 1429 if not self.ui.configbool('ui', 'commitsubrepos'):
1427 1430 raise util.Abort(dirtyreason,
1428 1431 hint=_("use --subrepos for recursive commit"))
1429 1432 subs.append(s)
1430 1433 commitsubs.add(s)
1431 1434 else:
1432 1435 bs = wctx.sub(s).basestate()
1433 1436 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1434 1437 if oldstate.get(s, (None, None, None))[1] != bs:
1435 1438 subs.append(s)
1436 1439
1437 1440 # check for removed subrepos
1438 1441 for p in wctx.parents():
1439 1442 r = [s for s in p.substate if s not in newstate]
1440 1443 subs += [s for s in r if match(s)]
1441 1444 if subs:
1442 1445 if (not match('.hgsub') and
1443 1446 '.hgsub' in (wctx.modified() + wctx.added())):
1444 1447 raise util.Abort(
1445 1448 _("can't commit subrepos without .hgsub"))
1446 1449 status.modified.insert(0, '.hgsubstate')
1447 1450
1448 1451 elif '.hgsub' in status.removed:
1449 1452 # clean up .hgsubstate when .hgsub is removed
1450 1453 if ('.hgsubstate' in wctx and
1451 1454 '.hgsubstate' not in (status.modified + status.added +
1452 1455 status.removed)):
1453 1456 status.removed.insert(0, '.hgsubstate')
1454 1457
1455 1458 # make sure all explicit patterns are matched
1456 1459 if not force and (match.isexact() or match.prefix()):
1457 1460 matched = set(status.modified + status.added + status.removed)
1458 1461
1459 1462 for f in match.files():
1460 1463 f = self.dirstate.normalize(f)
1461 1464 if f == '.' or f in matched or f in wctx.substate:
1462 1465 continue
1463 1466 if f in status.deleted:
1464 1467 fail(f, _('file not found!'))
1465 1468 if f in vdirs: # visited directory
1466 1469 d = f + '/'
1467 1470 for mf in matched:
1468 1471 if mf.startswith(d):
1469 1472 break
1470 1473 else:
1471 1474 fail(f, _("no match under directory!"))
1472 1475 elif f not in self.dirstate:
1473 1476 fail(f, _("file not tracked!"))
1474 1477
1475 1478 cctx = context.workingcommitctx(self, status,
1476 1479 text, user, date, extra)
1477 1480
1478 1481 # internal config: ui.allowemptycommit
1479 1482 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1480 1483 or extra.get('close') or merge or cctx.files()
1481 1484 or self.ui.configbool('ui', 'allowemptycommit'))
1482 1485 if not allowemptycommit:
1483 1486 return None
1484 1487
1485 1488 if merge and cctx.deleted():
1486 1489 raise util.Abort(_("cannot commit merge with missing files"))
1487 1490
1488 1491 ms = mergemod.mergestate(self)
1489 1492 for f in status.modified:
1490 1493 if f in ms and ms[f] == 'u':
1491 1494 raise util.Abort(_('unresolved merge conflicts '
1492 1495 '(see "hg help resolve")'))
1493 1496
1494 1497 if editor:
1495 1498 cctx._text = editor(self, cctx, subs)
1496 1499 edited = (text != cctx._text)
1497 1500
1498 1501 # Save commit message in case this transaction gets rolled back
1499 1502 # (e.g. by a pretxncommit hook). Leave the content alone on
1500 1503 # the assumption that the user will use the same editor again.
1501 1504 msgfn = self.savecommitmessage(cctx._text)
1502 1505
1503 1506 # commit subs and write new state
1504 1507 if subs:
1505 1508 for s in sorted(commitsubs):
1506 1509 sub = wctx.sub(s)
1507 1510 self.ui.status(_('committing subrepository %s\n') %
1508 1511 subrepo.subrelpath(sub))
1509 1512 sr = sub.commit(cctx._text, user, date)
1510 1513 newstate[s] = (newstate[s][0], sr)
1511 1514 subrepo.writestate(self, newstate)
1512 1515
1513 1516 p1, p2 = self.dirstate.parents()
1514 1517 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1515 1518 try:
1516 1519 self.hook("precommit", throw=True, parent1=hookp1,
1517 1520 parent2=hookp2)
1518 1521 ret = self.commitctx(cctx, True)
1519 1522 except: # re-raises
1520 1523 if edited:
1521 1524 self.ui.write(
1522 1525 _('note: commit message saved in %s\n') % msgfn)
1523 1526 raise
1524 1527
1525 1528 # update bookmarks, dirstate and mergestate
1526 1529 bookmarks.update(self, [p1, p2], ret)
1527 1530 cctx.markcommitted(ret)
1528 1531 ms.reset()
1529 1532 finally:
1530 1533 wlock.release()
1531 1534
1532 1535 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1533 1536 # hack for command that use a temporary commit (eg: histedit)
1534 1537 # temporary commit got stripped before hook release
1535 1538 if self.changelog.hasnode(ret):
1536 1539 self.hook("commit", node=node, parent1=parent1,
1537 1540 parent2=parent2)
1538 1541 self._afterlock(commithook)
1539 1542 return ret
1540 1543
1541 1544 @unfilteredmethod
1542 1545 def commitctx(self, ctx, error=False):
1543 1546 """Add a new revision to current repository.
1544 1547 Revision information is passed via the context argument.
1545 1548 """
1546 1549
1547 1550 tr = None
1548 1551 p1, p2 = ctx.p1(), ctx.p2()
1549 1552 user = ctx.user()
1550 1553
1551 1554 lock = self.lock()
1552 1555 try:
1553 1556 tr = self.transaction("commit")
1554 1557 trp = weakref.proxy(tr)
1555 1558
1556 1559 if ctx.files():
1557 1560 m1 = p1.manifest()
1558 1561 m2 = p2.manifest()
1559 1562 m = m1.copy()
1560 1563
1561 1564 # check in files
1562 1565 added = []
1563 1566 changed = []
1564 1567 removed = list(ctx.removed())
1565 1568 linkrev = len(self)
1566 1569 self.ui.note(_("committing files:\n"))
1567 1570 for f in sorted(ctx.modified() + ctx.added()):
1568 1571 self.ui.note(f + "\n")
1569 1572 try:
1570 1573 fctx = ctx[f]
1571 1574 if fctx is None:
1572 1575 removed.append(f)
1573 1576 else:
1574 1577 added.append(f)
1575 1578 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1576 1579 trp, changed)
1577 1580 m.setflag(f, fctx.flags())
1578 1581 except OSError as inst:
1579 1582 self.ui.warn(_("trouble committing %s!\n") % f)
1580 1583 raise
1581 1584 except IOError as inst:
1582 1585 errcode = getattr(inst, 'errno', errno.ENOENT)
1583 1586 if error or errcode and errcode != errno.ENOENT:
1584 1587 self.ui.warn(_("trouble committing %s!\n") % f)
1585 1588 raise
1586 1589
1587 1590 # update manifest
1588 1591 self.ui.note(_("committing manifest\n"))
1589 1592 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1590 1593 drop = [f for f in removed if f in m]
1591 1594 for f in drop:
1592 1595 del m[f]
1593 1596 mn = self.manifest.add(m, trp, linkrev,
1594 1597 p1.manifestnode(), p2.manifestnode(),
1595 1598 added, drop)
1596 1599 files = changed + removed
1597 1600 else:
1598 1601 mn = p1.manifestnode()
1599 1602 files = []
1600 1603
1601 1604 # update changelog
1602 1605 self.ui.note(_("committing changelog\n"))
1603 1606 self.changelog.delayupdate(tr)
1604 1607 n = self.changelog.add(mn, files, ctx.description(),
1605 1608 trp, p1.node(), p2.node(),
1606 1609 user, ctx.date(), ctx.extra().copy())
1607 1610 p = lambda: tr.writepending() and self.root or ""
1608 1611 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1609 1612 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1610 1613 parent2=xp2, pending=p)
1611 1614 # set the new commit is proper phase
1612 1615 targetphase = subrepo.newcommitphase(self.ui, ctx)
1613 1616 if targetphase:
1614 1617 # retract boundary do not alter parent changeset.
1615 1618 # if a parent have higher the resulting phase will
1616 1619 # be compliant anyway
1617 1620 #
1618 1621 # if minimal phase was 0 we don't need to retract anything
1619 1622 phases.retractboundary(self, tr, targetphase, [n])
1620 1623 tr.close()
1621 1624 branchmap.updatecache(self.filtered('served'))
1622 1625 return n
1623 1626 finally:
1624 1627 if tr:
1625 1628 tr.release()
1626 1629 lock.release()
1627 1630
1628 1631 @unfilteredmethod
1629 1632 def destroying(self):
1630 1633 '''Inform the repository that nodes are about to be destroyed.
1631 1634 Intended for use by strip and rollback, so there's a common
1632 1635 place for anything that has to be done before destroying history.
1633 1636
1634 1637 This is mostly useful for saving state that is in memory and waiting
1635 1638 to be flushed when the current lock is released. Because a call to
1636 1639 destroyed is imminent, the repo will be invalidated causing those
1637 1640 changes to stay in memory (waiting for the next unlock), or vanish
1638 1641 completely.
1639 1642 '''
1640 1643 # When using the same lock to commit and strip, the phasecache is left
1641 1644 # dirty after committing. Then when we strip, the repo is invalidated,
1642 1645 # causing those changes to disappear.
1643 1646 if '_phasecache' in vars(self):
1644 1647 self._phasecache.write()
1645 1648
1646 1649 @unfilteredmethod
1647 1650 def destroyed(self):
1648 1651 '''Inform the repository that nodes have been destroyed.
1649 1652 Intended for use by strip and rollback, so there's a common
1650 1653 place for anything that has to be done after destroying history.
1651 1654 '''
1652 1655 # When one tries to:
1653 1656 # 1) destroy nodes thus calling this method (e.g. strip)
1654 1657 # 2) use phasecache somewhere (e.g. commit)
1655 1658 #
1656 1659 # then 2) will fail because the phasecache contains nodes that were
1657 1660 # removed. We can either remove phasecache from the filecache,
1658 1661 # causing it to reload next time it is accessed, or simply filter
1659 1662 # the removed nodes now and write the updated cache.
1660 1663 self._phasecache.filterunknown(self)
1661 1664 self._phasecache.write()
1662 1665
1663 1666 # update the 'served' branch cache to help read only server process
1664 1667 # Thanks to branchcache collaboration this is done from the nearest
1665 1668 # filtered subset and it is expected to be fast.
1666 1669 branchmap.updatecache(self.filtered('served'))
1667 1670
1668 1671 # Ensure the persistent tag cache is updated. Doing it now
1669 1672 # means that the tag cache only has to worry about destroyed
1670 1673 # heads immediately after a strip/rollback. That in turn
1671 1674 # guarantees that "cachetip == currenttip" (comparing both rev
1672 1675 # and node) always means no nodes have been added or destroyed.
1673 1676
1674 1677 # XXX this is suboptimal when qrefresh'ing: we strip the current
1675 1678 # head, refresh the tag cache, then immediately add a new head.
1676 1679 # But I think doing it this way is necessary for the "instant
1677 1680 # tag cache retrieval" case to work.
1678 1681 self.invalidate()
1679 1682
1680 1683 def walk(self, match, node=None):
1681 1684 '''
1682 1685 walk recursively through the directory tree or a given
1683 1686 changeset, finding all files matched by the match
1684 1687 function
1685 1688 '''
1686 1689 return self[node].walk(match)
1687 1690
1688 1691 def status(self, node1='.', node2=None, match=None,
1689 1692 ignored=False, clean=False, unknown=False,
1690 1693 listsubrepos=False):
1691 1694 '''a convenience method that calls node1.status(node2)'''
1692 1695 return self[node1].status(node2, match, ignored, clean, unknown,
1693 1696 listsubrepos)
1694 1697
1695 1698 def heads(self, start=None):
1696 1699 heads = self.changelog.heads(start)
1697 1700 # sort the output in rev descending order
1698 1701 return sorted(heads, key=self.changelog.rev, reverse=True)
1699 1702
1700 1703 def branchheads(self, branch=None, start=None, closed=False):
1701 1704 '''return a (possibly filtered) list of heads for the given branch
1702 1705
1703 1706 Heads are returned in topological order, from newest to oldest.
1704 1707 If branch is None, use the dirstate branch.
1705 1708 If start is not None, return only heads reachable from start.
1706 1709 If closed is True, return heads that are marked as closed as well.
1707 1710 '''
1708 1711 if branch is None:
1709 1712 branch = self[None].branch()
1710 1713 branches = self.branchmap()
1711 1714 if branch not in branches:
1712 1715 return []
1713 1716 # the cache returns heads ordered lowest to highest
1714 1717 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1715 1718 if start is not None:
1716 1719 # filter out the heads that cannot be reached from startrev
1717 1720 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1718 1721 bheads = [h for h in bheads if h in fbheads]
1719 1722 return bheads
1720 1723
1721 1724 def branches(self, nodes):
1722 1725 if not nodes:
1723 1726 nodes = [self.changelog.tip()]
1724 1727 b = []
1725 1728 for n in nodes:
1726 1729 t = n
1727 1730 while True:
1728 1731 p = self.changelog.parents(n)
1729 1732 if p[1] != nullid or p[0] == nullid:
1730 1733 b.append((t, n, p[0], p[1]))
1731 1734 break
1732 1735 n = p[0]
1733 1736 return b
1734 1737
1735 1738 def between(self, pairs):
1736 1739 r = []
1737 1740
1738 1741 for top, bottom in pairs:
1739 1742 n, l, i = top, [], 0
1740 1743 f = 1
1741 1744
1742 1745 while n != bottom and n != nullid:
1743 1746 p = self.changelog.parents(n)[0]
1744 1747 if i == f:
1745 1748 l.append(n)
1746 1749 f = f * 2
1747 1750 n = p
1748 1751 i += 1
1749 1752
1750 1753 r.append(l)
1751 1754
1752 1755 return r
1753 1756
1754 1757 def checkpush(self, pushop):
1755 1758 """Extensions can override this function if additional checks have
1756 1759 to be performed before pushing, or call it if they override push
1757 1760 command.
1758 1761 """
1759 1762 pass
1760 1763
1761 1764 @unfilteredpropertycache
1762 1765 def prepushoutgoinghooks(self):
1763 1766 """Return util.hooks consists of "(repo, remote, outgoing)"
1764 1767 functions, which are called before pushing changesets.
1765 1768 """
1766 1769 return util.hooks()
1767 1770
1768 1771 def stream_in(self, remote, remotereqs):
1769 1772 # Save remote branchmap. We will use it later
1770 1773 # to speed up branchcache creation
1771 1774 rbranchmap = None
1772 1775 if remote.capable("branchmap"):
1773 1776 rbranchmap = remote.branchmap()
1774 1777
1775 1778 fp = remote.stream_out()
1776 1779 l = fp.readline()
1777 1780 try:
1778 1781 resp = int(l)
1779 1782 except ValueError:
1780 1783 raise error.ResponseError(
1781 1784 _('unexpected response from remote server:'), l)
1782 1785 if resp == 1:
1783 1786 raise util.Abort(_('operation forbidden by server'))
1784 1787 elif resp == 2:
1785 1788 raise util.Abort(_('locking the remote repository failed'))
1786 1789 elif resp != 0:
1787 1790 raise util.Abort(_('the server sent an unknown error code'))
1788 1791
1789 1792 self.applystreamclone(remotereqs, rbranchmap, fp)
1790 1793 return len(self.heads()) + 1
1791 1794
1792 1795 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1793 1796 """Apply stream clone data to this repository.
1794 1797
1795 1798 "remotereqs" is a set of requirements to handle the incoming data.
1796 1799 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1797 1800 can be None.
1798 1801 "fp" is a file object containing the raw stream data, suitable for
1799 1802 feeding into exchange.consumestreamclone.
1800 1803 """
1801 1804 lock = self.lock()
1802 1805 try:
1803 1806 exchange.consumestreamclone(self, fp)
1804 1807
1805 1808 # new requirements = old non-format requirements +
1806 1809 # new format-related remote requirements
1807 1810 # requirements from the streamed-in repository
1808 1811 self.requirements = remotereqs | (
1809 1812 self.requirements - self.supportedformats)
1810 1813 self._applyopenerreqs()
1811 1814 self._writerequirements()
1812 1815
1813 1816 if remotebranchmap:
1814 1817 rbheads = []
1815 1818 closed = []
1816 1819 for bheads in remotebranchmap.itervalues():
1817 1820 rbheads.extend(bheads)
1818 1821 for h in bheads:
1819 1822 r = self.changelog.rev(h)
1820 1823 b, c = self.changelog.branchinfo(r)
1821 1824 if c:
1822 1825 closed.append(h)
1823 1826
1824 1827 if rbheads:
1825 1828 rtiprev = max((int(self.changelog.rev(node))
1826 1829 for node in rbheads))
1827 1830 cache = branchmap.branchcache(remotebranchmap,
1828 1831 self[rtiprev].node(),
1829 1832 rtiprev,
1830 1833 closednodes=closed)
1831 1834 # Try to stick it as low as possible
1832 1835 # filter above served are unlikely to be fetch from a clone
1833 1836 for candidate in ('base', 'immutable', 'served'):
1834 1837 rview = self.filtered(candidate)
1835 1838 if cache.validfor(rview):
1836 1839 self._branchcaches[candidate] = cache
1837 1840 cache.write(rview)
1838 1841 break
1839 1842 self.invalidate()
1840 1843 finally:
1841 1844 lock.release()
1842 1845
1843 1846 def clone(self, remote, heads=[], stream=None):
1844 1847 '''clone remote repository.
1845 1848
1846 1849 keyword arguments:
1847 1850 heads: list of revs to clone (forces use of pull)
1848 1851 stream: use streaming clone if possible'''
1849 1852
1850 1853 # now, all clients that can request uncompressed clones can
1851 1854 # read repo formats supported by all servers that can serve
1852 1855 # them.
1853 1856
1854 1857 # if revlog format changes, client will have to check version
1855 1858 # and format flags on "stream" capability, and use
1856 1859 # uncompressed only if compatible.
1857 1860
1858 1861 if stream is None:
1859 1862 # if the server explicitly prefers to stream (for fast LANs)
1860 1863 stream = remote.capable('stream-preferred')
1861 1864
1862 1865 if stream and not heads:
1863 1866 # 'stream' means remote revlog format is revlogv1 only
1864 1867 if remote.capable('stream'):
1865 1868 self.stream_in(remote, set(('revlogv1',)))
1866 1869 else:
1867 1870 # otherwise, 'streamreqs' contains the remote revlog format
1868 1871 streamreqs = remote.capable('streamreqs')
1869 1872 if streamreqs:
1870 1873 streamreqs = set(streamreqs.split(','))
1871 1874 # if we support it, stream in and adjust our requirements
1872 1875 if not streamreqs - self.supportedformats:
1873 1876 self.stream_in(remote, streamreqs)
1874 1877
1875 1878 # internal config: ui.quietbookmarkmove
1876 1879 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1877 1880 try:
1878 1881 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1879 1882 ret = exchange.pull(self, remote, heads).cgresult
1880 1883 finally:
1881 1884 self.ui.restoreconfig(quiet)
1882 1885 return ret
1883 1886
1884 1887 def pushkey(self, namespace, key, old, new):
1885 1888 try:
1886 1889 tr = self.currenttransaction()
1887 1890 hookargs = {}
1888 1891 if tr is not None:
1889 1892 hookargs.update(tr.hookargs)
1890 1893 pending = lambda: tr.writepending() and self.root or ""
1891 1894 hookargs['pending'] = pending
1892 1895 hookargs['namespace'] = namespace
1893 1896 hookargs['key'] = key
1894 1897 hookargs['old'] = old
1895 1898 hookargs['new'] = new
1896 1899 self.hook('prepushkey', throw=True, **hookargs)
1897 1900 except error.HookAbort as exc:
1898 1901 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1899 1902 if exc.hint:
1900 1903 self.ui.write_err(_("(%s)\n") % exc.hint)
1901 1904 return False
1902 1905 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1903 1906 ret = pushkey.push(self, namespace, key, old, new)
1904 1907 def runhook():
1905 1908 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1906 1909 ret=ret)
1907 1910 self._afterlock(runhook)
1908 1911 return ret
1909 1912
1910 1913 def listkeys(self, namespace):
1911 1914 self.hook('prelistkeys', throw=True, namespace=namespace)
1912 1915 self.ui.debug('listing keys for "%s"\n' % namespace)
1913 1916 values = pushkey.list(self, namespace)
1914 1917 self.hook('listkeys', namespace=namespace, values=values)
1915 1918 return values
1916 1919
1917 1920 def debugwireargs(self, one, two, three=None, four=None, five=None):
1918 1921 '''used to test argument passing over the wire'''
1919 1922 return "%s %s %s %s %s" % (one, two, three, four, five)
1920 1923
1921 1924 def savecommitmessage(self, text):
1922 1925 fp = self.vfs('last-message.txt', 'wb')
1923 1926 try:
1924 1927 fp.write(text)
1925 1928 finally:
1926 1929 fp.close()
1927 1930 return self.pathto(fp.name[len(self.root) + 1:])
1928 1931
1929 1932 # used to avoid circular references so destructors work
1930 1933 def aftertrans(files):
1931 1934 renamefiles = [tuple(t) for t in files]
1932 1935 def a():
1933 1936 for vfs, src, dest in renamefiles:
1934 1937 try:
1935 1938 vfs.rename(src, dest)
1936 1939 except OSError: # journal file does not yet exist
1937 1940 pass
1938 1941 return a
1939 1942
1940 1943 def undoname(fn):
1941 1944 base, name = os.path.split(fn)
1942 1945 assert name.startswith('journal')
1943 1946 return os.path.join(base, name.replace('journal', 'undo', 1))
1944 1947
1945 1948 def instance(ui, path, create):
1946 1949 return localrepository(ui, util.urllocalpath(path), create)
1947 1950
1948 1951 def islocal(path):
1949 1952 return True
@@ -1,607 +1,635 b''
1 1 $ hg init basic
2 2 $ cd basic
3 3
4 4 should complain
5 5
6 6 $ hg backout
7 7 abort: please specify a revision to backout
8 8 [255]
9 9 $ hg backout -r 0 0
10 10 abort: please specify just one revision
11 11 [255]
12 12
13 13 basic operation
14 14 (this also tests that editor is invoked if the commit message is not
15 15 specified explicitly)
16 16
17 17 $ echo a > a
18 18 $ hg commit -d '0 0' -A -m a
19 19 adding a
20 20 $ echo b >> a
21 21 $ hg commit -d '1 0' -m b
22 22
23 23 $ hg status --rev tip --rev "tip^1"
24 24 M a
25 25 $ HGEDITOR=cat hg backout -d '2 0' tip --tool=true
26 26 reverting a
27 27 Backed out changeset a820f4f40a57
28 28
29 29
30 30 HG: Enter commit message. Lines beginning with 'HG:' are removed.
31 31 HG: Leave message empty to abort commit.
32 32 HG: --
33 33 HG: user: test
34 34 HG: branch 'default'
35 35 HG: changed a
36 36 changeset 2:2929462c3dff backs out changeset 1:a820f4f40a57
37 37 $ cat a
38 38 a
39 39 $ hg summary
40 40 parent: 2:2929462c3dff tip
41 41 Backed out changeset a820f4f40a57
42 42 branch: default
43 43 commit: (clean)
44 44 update: (current)
45 45 phases: 3 draft
46 46
47 47 commit option
48 48
49 49 $ cd ..
50 50 $ hg init commit
51 51 $ cd commit
52 52
53 53 $ echo tomatoes > a
54 54 $ hg add a
55 55 $ hg commit -d '0 0' -m tomatoes
56 56
57 57 $ echo chair > b
58 58 $ hg add b
59 59 $ hg commit -d '1 0' -m chair
60 60
61 61 $ echo grapes >> a
62 62 $ hg commit -d '2 0' -m grapes
63 63
64 64 $ hg backout --commit -d '4 0' 1 --tool=:fail
65 65 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
66 66 changeset 3:1c2161e97c0a backs out changeset 1:22cb4f70d813
67 67 $ hg summary
68 68 parent: 3:1c2161e97c0a tip
69 69 Backed out changeset 22cb4f70d813
70 70 branch: default
71 71 commit: (clean)
72 72 update: (current)
73 73 phases: 4 draft
74 74
75 75 $ echo ypples > a
76 76 $ hg commit -d '5 0' -m ypples
77 77
78 78 $ hg backout --commit -d '6 0' 2 --tool=:fail
79 79 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
80 80 use 'hg resolve' to retry unresolved file merges
81 81 [1]
82 82 $ hg summary
83 83 parent: 4:ed99997b793d tip
84 84 ypples
85 85 branch: default
86 86 commit: 1 unresolved (clean)
87 87 update: (current)
88 88 phases: 5 draft
89 89
90 90 file that was removed is recreated
91 91 (this also tests that editor is not invoked if the commit message is
92 92 specified explicitly)
93 93
94 94 $ cd ..
95 95 $ hg init remove
96 96 $ cd remove
97 97
98 98 $ echo content > a
99 99 $ hg commit -d '0 0' -A -m a
100 100 adding a
101 101
102 102 $ hg rm a
103 103 $ hg commit -d '1 0' -m b
104 104
105 105 $ HGEDITOR=cat hg backout -d '2 0' tip --tool=true -m "Backed out changeset 76862dcce372"
106 106 adding a
107 107 changeset 2:de31bdc76c0d backs out changeset 1:76862dcce372
108 108 $ cat a
109 109 content
110 110 $ hg summary
111 111 parent: 2:de31bdc76c0d tip
112 112 Backed out changeset 76862dcce372
113 113 branch: default
114 114 commit: (clean)
115 115 update: (current)
116 116 phases: 3 draft
117 117
118 118 backout of backout is as if nothing happened
119 119
120 120 $ hg backout -d '3 0' --merge tip --tool=true
121 121 removing a
122 122 changeset 3:7f6d0f120113 backs out changeset 2:de31bdc76c0d
123 123 $ test -f a
124 124 [1]
125 125 $ hg summary
126 126 parent: 3:7f6d0f120113 tip
127 127 Backed out changeset de31bdc76c0d
128 128 branch: default
129 129 commit: (clean)
130 130 update: (current)
131 131 phases: 4 draft
132 132
133 Test that 'hg rollback' restores dirstate just before opening
134 transaction: in-memory dirstate changes should be written into
135 '.hg/journal.dirstate' as expected.
136
137 $ echo 'removed soon' > b
138 $ hg commit -A -d '4 0' -m 'prepare for subsequent removing'
139 adding b
140 $ echo 'newly added' > c
141 $ hg add c
142 $ hg remove b
143 $ hg commit -d '5 0' -m 'prepare for subsequent backout'
144 $ touch -t 200001010000 c
145 $ hg status -A
146 C c
147 $ hg debugstate --nodates
148 n 644 12 set c
149 $ hg backout -d '6 0' -m 'to be rollback-ed soon' -r .
150 adding b
151 removing c
152 changeset 6:4bfec048029d backs out changeset 5:fac0b729a654
153 $ hg rollback -q
154 $ hg status -A
155 A b
156 R c
157 $ hg debugstate --nodates
158 a 0 -1 unset b
159 r 0 0 set c
160
133 161 across branch
134 162
135 163 $ cd ..
136 164 $ hg init branch
137 165 $ cd branch
138 166 $ echo a > a
139 167 $ hg ci -Am0
140 168 adding a
141 169 $ echo b > b
142 170 $ hg ci -Am1
143 171 adding b
144 172 $ hg co -C 0
145 173 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
146 174 $ hg summary
147 175 parent: 0:f7b1eb17ad24
148 176 0
149 177 branch: default
150 178 commit: (clean)
151 179 update: 1 new changesets (update)
152 180 phases: 2 draft
153 181
154 182 should fail
155 183
156 184 $ hg backout 1
157 185 abort: cannot backout change that is not an ancestor
158 186 [255]
159 187 $ echo c > c
160 188 $ hg ci -Am2
161 189 adding c
162 190 created new head
163 191 $ hg summary
164 192 parent: 2:db815d6d32e6 tip
165 193 2
166 194 branch: default
167 195 commit: (clean)
168 196 update: 1 new changesets, 2 branch heads (merge)
169 197 phases: 3 draft
170 198
171 199 should fail
172 200
173 201 $ hg backout 1
174 202 abort: cannot backout change that is not an ancestor
175 203 [255]
176 204 $ hg summary
177 205 parent: 2:db815d6d32e6 tip
178 206 2
179 207 branch: default
180 208 commit: (clean)
181 209 update: 1 new changesets, 2 branch heads (merge)
182 210 phases: 3 draft
183 211
184 212 backout with merge
185 213
186 214 $ cd ..
187 215 $ hg init merge
188 216 $ cd merge
189 217
190 218 $ echo line 1 > a
191 219 $ echo line 2 >> a
192 220 $ hg commit -d '0 0' -A -m a
193 221 adding a
194 222 $ hg summary
195 223 parent: 0:59395513a13a tip
196 224 a
197 225 branch: default
198 226 commit: (clean)
199 227 update: (current)
200 228 phases: 1 draft
201 229
202 230 remove line 1
203 231
204 232 $ echo line 2 > a
205 233 $ hg commit -d '1 0' -m b
206 234
207 235 $ echo line 3 >> a
208 236 $ hg commit -d '2 0' -m c
209 237
210 238 $ hg backout --merge -d '3 0' 1 --tool=true
211 239 reverting a
212 240 created new head
213 241 changeset 3:26b8ccb9ad91 backs out changeset 1:5a50a024c182
214 242 merging with changeset 3:26b8ccb9ad91
215 243 merging a
216 244 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
217 245 (branch merge, don't forget to commit)
218 246 $ hg commit -d '4 0' -m d
219 247 $ hg summary
220 248 parent: 4:c7df5e0b9c09 tip
221 249 d
222 250 branch: default
223 251 commit: (clean)
224 252 update: (current)
225 253 phases: 5 draft
226 254
227 255 check line 1 is back
228 256
229 257 $ cat a
230 258 line 1
231 259 line 2
232 260 line 3
233 261
234 262 $ cd ..
235 263
236 264 backout should not back out subsequent changesets
237 265
238 266 $ hg init onecs
239 267 $ cd onecs
240 268 $ echo 1 > a
241 269 $ hg commit -d '0 0' -A -m a
242 270 adding a
243 271 $ echo 2 >> a
244 272 $ hg commit -d '1 0' -m b
245 273 $ echo 1 > b
246 274 $ hg commit -d '2 0' -A -m c
247 275 adding b
248 276 $ hg summary
249 277 parent: 2:882396649954 tip
250 278 c
251 279 branch: default
252 280 commit: (clean)
253 281 update: (current)
254 282 phases: 3 draft
255 283
256 284 without --merge
257 285 $ hg backout -d '3 0' 1 --tool=true
258 286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 287 changeset 22bca4c721e5 backed out, don't forget to commit.
260 288 $ hg locate b
261 289 b
262 290 $ hg update -C tip
263 291 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
264 292 $ hg locate b
265 293 b
266 294 $ hg summary
267 295 parent: 2:882396649954 tip
268 296 c
269 297 branch: default
270 298 commit: (clean)
271 299 update: (current)
272 300 phases: 3 draft
273 301
274 302 with --merge
275 303 $ hg backout --merge -d '3 0' 1 --tool=true
276 304 reverting a
277 305 created new head
278 306 changeset 3:3202beb76721 backs out changeset 1:22bca4c721e5
279 307 merging with changeset 3:3202beb76721
280 308 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
281 309 (branch merge, don't forget to commit)
282 310 $ hg locate b
283 311 b
284 312 $ hg update -C tip
285 313 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
286 314 $ hg locate b
287 315 [1]
288 316
289 317 $ cd ..
290 318 $ hg init m
291 319 $ cd m
292 320 $ echo a > a
293 321 $ hg commit -d '0 0' -A -m a
294 322 adding a
295 323 $ echo b > b
296 324 $ hg commit -d '1 0' -A -m b
297 325 adding b
298 326 $ echo c > c
299 327 $ hg commit -d '2 0' -A -m b
300 328 adding c
301 329 $ hg update 1
302 330 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
303 331 $ echo d > d
304 332 $ hg commit -d '3 0' -A -m c
305 333 adding d
306 334 created new head
307 335 $ hg merge 2
308 336 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
309 337 (branch merge, don't forget to commit)
310 338 $ hg commit -d '4 0' -A -m d
311 339 $ hg summary
312 340 parent: 4:b2f3bb92043e tip
313 341 d
314 342 branch: default
315 343 commit: (clean)
316 344 update: (current)
317 345 phases: 5 draft
318 346
319 347 backout of merge should fail
320 348
321 349 $ hg backout 4
322 350 abort: cannot backout a merge changeset
323 351 [255]
324 352
325 353 backout of merge with bad parent should fail
326 354
327 355 $ hg backout --parent 0 4
328 356 abort: cb9a9f314b8b is not a parent of b2f3bb92043e
329 357 [255]
330 358
331 359 backout of non-merge with parent should fail
332 360
333 361 $ hg backout --parent 0 3
334 362 abort: cannot use --parent on non-merge changeset
335 363 [255]
336 364
337 365 backout with valid parent should be ok
338 366
339 367 $ hg backout -d '5 0' --parent 2 4 --tool=true
340 368 removing d
341 369 changeset 5:10e5328c8435 backs out changeset 4:b2f3bb92043e
342 370 $ hg summary
343 371 parent: 5:10e5328c8435 tip
344 372 Backed out changeset b2f3bb92043e
345 373 branch: default
346 374 commit: (clean)
347 375 update: (current)
348 376 phases: 6 draft
349 377
350 378 $ hg rollback
351 379 repository tip rolled back to revision 4 (undo commit)
352 380 working directory now based on revision 4
353 381 $ hg update -C
354 382 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
355 383 $ hg summary
356 384 parent: 4:b2f3bb92043e tip
357 385 d
358 386 branch: default
359 387 commit: (clean)
360 388 update: (current)
361 389 phases: 5 draft
362 390
363 391 $ hg backout -d '6 0' --parent 3 4 --tool=true
364 392 removing c
365 393 changeset 5:033590168430 backs out changeset 4:b2f3bb92043e
366 394 $ hg summary
367 395 parent: 5:033590168430 tip
368 396 Backed out changeset b2f3bb92043e
369 397 branch: default
370 398 commit: (clean)
371 399 update: (current)
372 400 phases: 6 draft
373 401
374 402 $ cd ..
375 403
376 404 named branches
377 405
378 406 $ hg init named_branches
379 407 $ cd named_branches
380 408
381 409 $ echo default > default
382 410 $ hg ci -d '0 0' -Am default
383 411 adding default
384 412 $ hg branch branch1
385 413 marked working directory as branch branch1
386 414 (branches are permanent and global, did you want a bookmark?)
387 415 $ echo branch1 > file1
388 416 $ hg ci -d '1 0' -Am file1
389 417 adding file1
390 418 $ hg branch branch2
391 419 marked working directory as branch branch2
392 420 $ echo branch2 > file2
393 421 $ hg ci -d '2 0' -Am file2
394 422 adding file2
395 423
396 424 without --merge
397 425 $ hg backout -r 1 --tool=true
398 426 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
399 427 changeset bf1602f437f3 backed out, don't forget to commit.
400 428 $ hg branch
401 429 branch2
402 430 $ hg status -A
403 431 R file1
404 432 C default
405 433 C file2
406 434 $ hg summary
407 435 parent: 2:45bbcd363bf0 tip
408 436 file2
409 437 branch: branch2
410 438 commit: 1 removed
411 439 update: (current)
412 440 phases: 3 draft
413 441
414 442 with --merge
415 443 (this also tests that editor is invoked if '--edit' is specified
416 444 explicitly regardless of '--message')
417 445
418 446 $ hg update -qC
419 447 $ HGEDITOR=cat hg backout --merge -d '3 0' -r 1 -m 'backout on branch1' --tool=true --edit
420 448 removing file1
421 449 backout on branch1
422 450
423 451
424 452 HG: Enter commit message. Lines beginning with 'HG:' are removed.
425 453 HG: Leave message empty to abort commit.
426 454 HG: --
427 455 HG: user: test
428 456 HG: branch 'branch2'
429 457 HG: removed file1
430 458 created new head
431 459 changeset 3:d4e8f6db59fb backs out changeset 1:bf1602f437f3
432 460 merging with changeset 3:d4e8f6db59fb
433 461 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
434 462 (branch merge, don't forget to commit)
435 463 $ hg summary
436 464 parent: 2:45bbcd363bf0
437 465 file2
438 466 parent: 3:d4e8f6db59fb tip
439 467 backout on branch1
440 468 branch: branch2
441 469 commit: 1 removed (merge)
442 470 update: (current)
443 471 phases: 4 draft
444 472 $ hg update -q -C 2
445 473
446 474 on branch2 with branch1 not merged, so file1 should still exist:
447 475
448 476 $ hg id
449 477 45bbcd363bf0 (branch2)
450 478 $ hg st -A
451 479 C default
452 480 C file1
453 481 C file2
454 482 $ hg summary
455 483 parent: 2:45bbcd363bf0
456 484 file2
457 485 branch: branch2
458 486 commit: (clean)
459 487 update: 1 new changesets, 2 branch heads (merge)
460 488 phases: 4 draft
461 489
462 490 on branch2 with branch1 merged, so file1 should be gone:
463 491
464 492 $ hg merge
465 493 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
466 494 (branch merge, don't forget to commit)
467 495 $ hg ci -d '4 0' -m 'merge backout of branch1'
468 496 $ hg id
469 497 22149cdde76d (branch2) tip
470 498 $ hg st -A
471 499 C default
472 500 C file2
473 501 $ hg summary
474 502 parent: 4:22149cdde76d tip
475 503 merge backout of branch1
476 504 branch: branch2
477 505 commit: (clean)
478 506 update: (current)
479 507 phases: 5 draft
480 508
481 509 on branch1, so no file1 and file2:
482 510
483 511 $ hg co -C branch1
484 512 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
485 513 $ hg id
486 514 bf1602f437f3 (branch1)
487 515 $ hg st -A
488 516 C default
489 517 C file1
490 518 $ hg summary
491 519 parent: 1:bf1602f437f3
492 520 file1
493 521 branch: branch1
494 522 commit: (clean)
495 523 update: (current)
496 524 phases: 5 draft
497 525
498 526 $ cd ..
499 527
500 528 backout of empty changeset (issue4190)
501 529
502 530 $ hg init emptycommit
503 531 $ cd emptycommit
504 532
505 533 $ touch file1
506 534 $ hg ci -Aqm file1
507 535 $ hg branch -q branch1
508 536 $ hg ci -qm branch1
509 537 $ hg backout -v 1
510 538 resolving manifests
511 539 nothing changed
512 540 [1]
513 541
514 542 $ cd ..
515 543
516 544
517 545 Test usage of `hg resolve` in case of conflict
518 546 (issue4163)
519 547
520 548 $ hg init issue4163
521 549 $ cd issue4163
522 550 $ touch foo
523 551 $ hg add foo
524 552 $ cat > foo << EOF
525 553 > one
526 554 > two
527 555 > three
528 556 > four
529 557 > five
530 558 > six
531 559 > seven
532 560 > height
533 561 > nine
534 562 > ten
535 563 > EOF
536 564 $ hg ci -m 'initial'
537 565 $ cat > foo << EOF
538 566 > one
539 567 > two
540 568 > THREE
541 569 > four
542 570 > five
543 571 > six
544 572 > seven
545 573 > height
546 574 > nine
547 575 > ten
548 576 > EOF
549 577 $ hg ci -m 'capital three'
550 578 $ cat > foo << EOF
551 579 > one
552 580 > two
553 581 > THREE
554 582 > four
555 583 > five
556 584 > six
557 585 > seven
558 586 > height
559 587 > nine
560 588 > TEN
561 589 > EOF
562 590 $ hg ci -m 'capital ten'
563 591 $ hg backout -r 'desc("capital three")' --tool internal:fail
564 592 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
565 593 use 'hg resolve' to retry unresolved file merges
566 594 [1]
567 595 $ hg status
568 596 $ hg resolve -l # still unresolved
569 597 U foo
570 598 $ hg summary
571 599 parent: 2:b71750c4b0fd tip
572 600 capital ten
573 601 branch: default
574 602 commit: 1 unresolved (clean)
575 603 update: (current)
576 604 phases: 3 draft
577 605 $ hg resolve --all --debug
578 606 picked tool 'internal:merge' for foo (binary False symlink False)
579 607 merging foo
580 608 my foo@b71750c4b0fd+ other foo@a30dd8addae3 ancestor foo@913609522437
581 609 premerge successful
582 610 (no more unresolved files)
583 611 $ hg status
584 612 M foo
585 613 ? foo.orig
586 614 $ hg resolve -l
587 615 R foo
588 616 $ hg summary
589 617 parent: 2:b71750c4b0fd tip
590 618 capital ten
591 619 branch: default
592 620 commit: 1 modified, 1 unknown
593 621 update: (current)
594 622 phases: 3 draft
595 623 $ cat foo
596 624 one
597 625 two
598 626 three
599 627 four
600 628 five
601 629 six
602 630 seven
603 631 height
604 632 nine
605 633 TEN
606 634
607 635
General Comments 0
You need to be logged in to leave comments. Login now