##// END OF EJS Templates
transaction: register summary callbacks only at start of transaction (BC)...
Martin von Zweigbergk -
r35726:03e92194 default
parent child Browse files
Show More
@@ -1,2274 +1,2273
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # This is only used by context.workingctx.match in order to
368 368 # detect files in subrepos.
369 369 self.auditor = pathutil.pathauditor(
370 370 self.root, callback=self._checknested)
371 371 # This is only used by context.basectx.match in order to detect
372 372 # files in subrepos.
373 373 self.nofsauditor = pathutil.pathauditor(
374 374 self.root, callback=self._checknested, realfs=False, cached=True)
375 375 self.baseui = baseui
376 376 self.ui = baseui.copy()
377 377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 379 if (self.ui.configbool('devel', 'all-warnings') or
380 380 self.ui.configbool('devel', 'check-locks')):
381 381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 382 # A list of callback to shape the phase if no data were found.
383 383 # Callback are in the form: func(repo, roots) --> processed root.
384 384 # This list it to be filled by extension during repo setup
385 385 self._phasedefaults = []
386 386 try:
387 387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 388 self._loadextensions()
389 389 except IOError:
390 390 pass
391 391
392 392 if self.featuresetupfuncs:
393 393 self.supported = set(self._basesupported) # use private copy
394 394 extmods = set(m.__name__ for n, m
395 395 in extensions.extensions(self.ui))
396 396 for setupfunc in self.featuresetupfuncs:
397 397 if setupfunc.__module__ in extmods:
398 398 setupfunc(self.ui, self.supported)
399 399 else:
400 400 self.supported = self._basesupported
401 401 color.setup(self.ui)
402 402
403 403 # Add compression engines.
404 404 for name in util.compengines:
405 405 engine = util.compengines[name]
406 406 if engine.revlogheader():
407 407 self.supported.add('exp-compression-%s' % name)
408 408
409 409 if not self.vfs.isdir():
410 410 if create:
411 411 self.requirements = newreporequirements(self)
412 412
413 413 if not self.wvfs.exists():
414 414 self.wvfs.makedirs()
415 415 self.vfs.makedir(notindexed=True)
416 416
417 417 if 'store' in self.requirements:
418 418 self.vfs.mkdir("store")
419 419
420 420 # create an invalid changelog
421 421 self.vfs.append(
422 422 "00changelog.i",
423 423 '\0\0\0\2' # represents revlogv2
424 424 ' dummy changelog to prevent using the old repo layout'
425 425 )
426 426 else:
427 427 raise error.RepoError(_("repository %s not found") % path)
428 428 elif create:
429 429 raise error.RepoError(_("repository %s already exists") % path)
430 430 else:
431 431 try:
432 432 self.requirements = scmutil.readrequires(
433 433 self.vfs, self.supported)
434 434 except IOError as inst:
435 435 if inst.errno != errno.ENOENT:
436 436 raise
437 437
438 438 cachepath = self.vfs.join('cache')
439 439 self.sharedpath = self.path
440 440 try:
441 441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 442 if 'relshared' in self.requirements:
443 443 sharedpath = self.vfs.join(sharedpath)
444 444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 445 cachepath = vfs.join('cache')
446 446 s = vfs.base
447 447 if not vfs.exists():
448 448 raise error.RepoError(
449 449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 450 self.sharedpath = s
451 451 except IOError as inst:
452 452 if inst.errno != errno.ENOENT:
453 453 raise
454 454
455 455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 456 raise error.RepoError(_('repository is using sparse feature but '
457 457 'sparse is not enabled; enable the '
458 458 '"sparse" extensions to access'))
459 459
460 460 self.store = store.store(
461 461 self.requirements, self.sharedpath,
462 462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 463 self.spath = self.store.path
464 464 self.svfs = self.store.vfs
465 465 self.sjoin = self.store.join
466 466 self.vfs.createmode = self.store.createmode
467 467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 468 self.cachevfs.createmode = self.store.createmode
469 469 if (self.ui.configbool('devel', 'all-warnings') or
470 470 self.ui.configbool('devel', 'check-locks')):
471 471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 473 else: # standard vfs
474 474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 475 self._applyopenerreqs()
476 476 if create:
477 477 self._writerequirements()
478 478
479 479 self._dirstatevalidatewarned = False
480 480
481 481 self._branchcaches = {}
482 482 self._revbranchcache = None
483 483 self.filterpats = {}
484 484 self._datafilters = {}
485 485 self._transref = self._lockref = self._wlockref = None
486 486
487 487 # A cache for various files under .hg/ that tracks file changes,
488 488 # (used by the filecache decorator)
489 489 #
490 490 # Maps a property name to its util.filecacheentry
491 491 self._filecache = {}
492 492
493 493 # hold sets of revision to be filtered
494 494 # should be cleared when something might have changed the filter value:
495 495 # - new changesets,
496 496 # - phase change,
497 497 # - new obsolescence marker,
498 498 # - working directory parent change,
499 499 # - bookmark changes
500 500 self.filteredrevcache = {}
501 501
502 502 # post-dirstate-status hooks
503 503 self._postdsstatus = []
504 504
505 505 # generic mapping between names and nodes
506 506 self.names = namespaces.namespaces()
507 507
508 508 # Key to signature value.
509 509 self._sparsesignaturecache = {}
510 510 # Signature to cached matcher instance.
511 511 self._sparsematchercache = {}
512 512
513 513 def _getvfsward(self, origfunc):
514 514 """build a ward for self.vfs"""
515 515 rref = weakref.ref(self)
516 516 def checkvfs(path, mode=None):
517 517 ret = origfunc(path, mode=mode)
518 518 repo = rref()
519 519 if (repo is None
520 520 or not util.safehasattr(repo, '_wlockref')
521 521 or not util.safehasattr(repo, '_lockref')):
522 522 return
523 523 if mode in (None, 'r', 'rb'):
524 524 return
525 525 if path.startswith(repo.path):
526 526 # truncate name relative to the repository (.hg)
527 527 path = path[len(repo.path) + 1:]
528 528 if path.startswith('cache/'):
529 529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 531 if path.startswith('journal.'):
532 532 # journal is covered by 'lock'
533 533 if repo._currentlock(repo._lockref) is None:
534 534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 535 stacklevel=2, config='check-locks')
536 536 elif repo._currentlock(repo._wlockref) is None:
537 537 # rest of vfs files are covered by 'wlock'
538 538 #
539 539 # exclude special files
540 540 for prefix in self._wlockfreeprefix:
541 541 if path.startswith(prefix):
542 542 return
543 543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 544 stacklevel=2, config='check-locks')
545 545 return ret
546 546 return checkvfs
547 547
548 548 def _getsvfsward(self, origfunc):
549 549 """build a ward for self.svfs"""
550 550 rref = weakref.ref(self)
551 551 def checksvfs(path, mode=None):
552 552 ret = origfunc(path, mode=mode)
553 553 repo = rref()
554 554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 555 return
556 556 if mode in (None, 'r', 'rb'):
557 557 return
558 558 if path.startswith(repo.sharedpath):
559 559 # truncate name relative to the repository (.hg)
560 560 path = path[len(repo.sharedpath) + 1:]
561 561 if repo._currentlock(repo._lockref) is None:
562 562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 563 stacklevel=3)
564 564 return ret
565 565 return checksvfs
566 566
567 567 def close(self):
568 568 self._writecaches()
569 569
570 570 def _loadextensions(self):
571 571 extensions.loadall(self.ui)
572 572
573 573 def _writecaches(self):
574 574 if self._revbranchcache:
575 575 self._revbranchcache.write()
576 576
577 577 def _restrictcapabilities(self, caps):
578 578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 579 caps = set(caps)
580 580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 582 return caps
583 583
584 584 def _applyopenerreqs(self):
585 585 self.svfs.options = dict((r, 1) for r in self.requirements
586 586 if r in self.openerreqs)
587 587 # experimental config: format.chunkcachesize
588 588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 589 if chunkcachesize is not None:
590 590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 591 # experimental config: format.maxchainlen
592 592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 593 if maxchainlen is not None:
594 594 self.svfs.options['maxchainlen'] = maxchainlen
595 595 # experimental config: format.manifestcachesize
596 596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 597 if manifestcachesize is not None:
598 598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 599 # experimental config: format.aggressivemergedeltas
600 600 aggressivemergedeltas = self.ui.configbool('format',
601 601 'aggressivemergedeltas')
602 602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 605 if 0 <= chainspan:
606 606 self.svfs.options['maxdeltachainspan'] = chainspan
607 607 mmapindexthreshold = self.ui.configbytes('experimental',
608 608 'mmapindexthreshold')
609 609 if mmapindexthreshold is not None:
610 610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 612 srdensitythres = float(self.ui.config('experimental',
613 613 'sparse-read.density-threshold'))
614 614 srmingapsize = self.ui.configbytes('experimental',
615 615 'sparse-read.min-gap-size')
616 616 self.svfs.options['with-sparse-read'] = withsparseread
617 617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 619
620 620 for r in self.requirements:
621 621 if r.startswith('exp-compression-'):
622 622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 623
624 624 # TODO move "revlogv2" to openerreqs once finalized.
625 625 if REVLOGV2_REQUIREMENT in self.requirements:
626 626 self.svfs.options['revlogv2'] = True
627 627
628 628 def _writerequirements(self):
629 629 scmutil.writerequires(self.vfs, self.requirements)
630 630
631 631 def _checknested(self, path):
632 632 """Determine if path is a legal nested repository."""
633 633 if not path.startswith(self.root):
634 634 return False
635 635 subpath = path[len(self.root) + 1:]
636 636 normsubpath = util.pconvert(subpath)
637 637
638 638 # XXX: Checking against the current working copy is wrong in
639 639 # the sense that it can reject things like
640 640 #
641 641 # $ hg cat -r 10 sub/x.txt
642 642 #
643 643 # if sub/ is no longer a subrepository in the working copy
644 644 # parent revision.
645 645 #
646 646 # However, it can of course also allow things that would have
647 647 # been rejected before, such as the above cat command if sub/
648 648 # is a subrepository now, but was a normal directory before.
649 649 # The old path auditor would have rejected by mistake since it
650 650 # panics when it sees sub/.hg/.
651 651 #
652 652 # All in all, checking against the working copy seems sensible
653 653 # since we want to prevent access to nested repositories on
654 654 # the filesystem *now*.
655 655 ctx = self[None]
656 656 parts = util.splitpath(subpath)
657 657 while parts:
658 658 prefix = '/'.join(parts)
659 659 if prefix in ctx.substate:
660 660 if prefix == normsubpath:
661 661 return True
662 662 else:
663 663 sub = ctx.sub(prefix)
664 664 return sub.checknested(subpath[len(prefix) + 1:])
665 665 else:
666 666 parts.pop()
667 667 return False
668 668
669 669 def peer(self):
670 670 return localpeer(self) # not cached to avoid reference cycle
671 671
672 672 def unfiltered(self):
673 673 """Return unfiltered version of the repository
674 674
675 675 Intended to be overwritten by filtered repo."""
676 676 return self
677 677
678 678 def filtered(self, name, visibilityexceptions=None):
679 679 """Return a filtered version of a repository"""
680 680 cls = repoview.newtype(self.unfiltered().__class__)
681 681 return cls(self, name, visibilityexceptions)
682 682
683 683 @repofilecache('bookmarks', 'bookmarks.current')
684 684 def _bookmarks(self):
685 685 return bookmarks.bmstore(self)
686 686
687 687 @property
688 688 def _activebookmark(self):
689 689 return self._bookmarks.active
690 690
691 691 # _phasesets depend on changelog. what we need is to call
692 692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 693 # can't be easily expressed in filecache mechanism.
694 694 @storecache('phaseroots', '00changelog.i')
695 695 def _phasecache(self):
696 696 return phases.phasecache(self, self._phasedefaults)
697 697
698 698 @storecache('obsstore')
699 699 def obsstore(self):
700 700 return obsolete.makestore(self.ui, self)
701 701
702 702 @storecache('00changelog.i')
703 703 def changelog(self):
704 704 return changelog.changelog(self.svfs,
705 705 trypending=txnutil.mayhavepending(self.root))
706 706
707 707 def _constructmanifest(self):
708 708 # This is a temporary function while we migrate from manifest to
709 709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 710 # manifest creation.
711 711 return manifest.manifestrevlog(self.svfs)
712 712
713 713 @storecache('00manifest.i')
714 714 def manifestlog(self):
715 715 return manifest.manifestlog(self.svfs, self)
716 716
717 717 @repofilecache('dirstate')
718 718 def dirstate(self):
719 719 sparsematchfn = lambda: sparse.matcher(self)
720 720
721 721 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 722 self._dirstatevalidate, sparsematchfn)
723 723
724 724 def _dirstatevalidate(self, node):
725 725 try:
726 726 self.changelog.rev(node)
727 727 return node
728 728 except error.LookupError:
729 729 if not self._dirstatevalidatewarned:
730 730 self._dirstatevalidatewarned = True
731 731 self.ui.warn(_("warning: ignoring unknown"
732 732 " working parent %s!\n") % short(node))
733 733 return nullid
734 734
735 735 def __getitem__(self, changeid):
736 736 if changeid is None:
737 737 return context.workingctx(self)
738 738 if isinstance(changeid, slice):
739 739 # wdirrev isn't contiguous so the slice shouldn't include it
740 740 return [context.changectx(self, i)
741 741 for i in xrange(*changeid.indices(len(self)))
742 742 if i not in self.changelog.filteredrevs]
743 743 try:
744 744 return context.changectx(self, changeid)
745 745 except error.WdirUnsupported:
746 746 return context.workingctx(self)
747 747
748 748 def __contains__(self, changeid):
749 749 """True if the given changeid exists
750 750
751 751 error.LookupError is raised if an ambiguous node specified.
752 752 """
753 753 try:
754 754 self[changeid]
755 755 return True
756 756 except error.RepoLookupError:
757 757 return False
758 758
759 759 def __nonzero__(self):
760 760 return True
761 761
762 762 __bool__ = __nonzero__
763 763
764 764 def __len__(self):
765 765 return len(self.changelog)
766 766
767 767 def __iter__(self):
768 768 return iter(self.changelog)
769 769
770 770 def revs(self, expr, *args):
771 771 '''Find revisions matching a revset.
772 772
773 773 The revset is specified as a string ``expr`` that may contain
774 774 %-formatting to escape certain types. See ``revsetlang.formatspec``.
775 775
776 776 Revset aliases from the configuration are not expanded. To expand
777 777 user aliases, consider calling ``scmutil.revrange()`` or
778 778 ``repo.anyrevs([expr], user=True)``.
779 779
780 780 Returns a revset.abstractsmartset, which is a list-like interface
781 781 that contains integer revisions.
782 782 '''
783 783 expr = revsetlang.formatspec(expr, *args)
784 784 m = revset.match(None, expr)
785 785 return m(self)
786 786
787 787 def set(self, expr, *args):
788 788 '''Find revisions matching a revset and emit changectx instances.
789 789
790 790 This is a convenience wrapper around ``revs()`` that iterates the
791 791 result and is a generator of changectx instances.
792 792
793 793 Revset aliases from the configuration are not expanded. To expand
794 794 user aliases, consider calling ``scmutil.revrange()``.
795 795 '''
796 796 for r in self.revs(expr, *args):
797 797 yield self[r]
798 798
799 799 def anyrevs(self, specs, user=False, localalias=None):
800 800 '''Find revisions matching one of the given revsets.
801 801
802 802 Revset aliases from the configuration are not expanded by default. To
803 803 expand user aliases, specify ``user=True``. To provide some local
804 804 definitions overriding user aliases, set ``localalias`` to
805 805 ``{name: definitionstring}``.
806 806 '''
807 807 if user:
808 808 m = revset.matchany(self.ui, specs, repo=self,
809 809 localalias=localalias)
810 810 else:
811 811 m = revset.matchany(None, specs, localalias=localalias)
812 812 return m(self)
813 813
814 814 def url(self):
815 815 return 'file:' + self.root
816 816
817 817 def hook(self, name, throw=False, **args):
818 818 """Call a hook, passing this repo instance.
819 819
820 820 This a convenience method to aid invoking hooks. Extensions likely
821 821 won't call this unless they have registered a custom hook or are
822 822 replacing code that is expected to call a hook.
823 823 """
824 824 return hook.hook(self.ui, self, name, throw, **args)
825 825
826 826 @filteredpropertycache
827 827 def _tagscache(self):
828 828 '''Returns a tagscache object that contains various tags related
829 829 caches.'''
830 830
831 831 # This simplifies its cache management by having one decorated
832 832 # function (this one) and the rest simply fetch things from it.
833 833 class tagscache(object):
834 834 def __init__(self):
835 835 # These two define the set of tags for this repository. tags
836 836 # maps tag name to node; tagtypes maps tag name to 'global' or
837 837 # 'local'. (Global tags are defined by .hgtags across all
838 838 # heads, and local tags are defined in .hg/localtags.)
839 839 # They constitute the in-memory cache of tags.
840 840 self.tags = self.tagtypes = None
841 841
842 842 self.nodetagscache = self.tagslist = None
843 843
844 844 cache = tagscache()
845 845 cache.tags, cache.tagtypes = self._findtags()
846 846
847 847 return cache
848 848
849 849 def tags(self):
850 850 '''return a mapping of tag to node'''
851 851 t = {}
852 852 if self.changelog.filteredrevs:
853 853 tags, tt = self._findtags()
854 854 else:
855 855 tags = self._tagscache.tags
856 856 for k, v in tags.iteritems():
857 857 try:
858 858 # ignore tags to unknown nodes
859 859 self.changelog.rev(v)
860 860 t[k] = v
861 861 except (error.LookupError, ValueError):
862 862 pass
863 863 return t
864 864
865 865 def _findtags(self):
866 866 '''Do the hard work of finding tags. Return a pair of dicts
867 867 (tags, tagtypes) where tags maps tag name to node, and tagtypes
868 868 maps tag name to a string like \'global\' or \'local\'.
869 869 Subclasses or extensions are free to add their own tags, but
870 870 should be aware that the returned dicts will be retained for the
871 871 duration of the localrepo object.'''
872 872
873 873 # XXX what tagtype should subclasses/extensions use? Currently
874 874 # mq and bookmarks add tags, but do not set the tagtype at all.
875 875 # Should each extension invent its own tag type? Should there
876 876 # be one tagtype for all such "virtual" tags? Or is the status
877 877 # quo fine?
878 878
879 879
880 880 # map tag name to (node, hist)
881 881 alltags = tagsmod.findglobaltags(self.ui, self)
882 882 # map tag name to tag type
883 883 tagtypes = dict((tag, 'global') for tag in alltags)
884 884
885 885 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
886 886
887 887 # Build the return dicts. Have to re-encode tag names because
888 888 # the tags module always uses UTF-8 (in order not to lose info
889 889 # writing to the cache), but the rest of Mercurial wants them in
890 890 # local encoding.
891 891 tags = {}
892 892 for (name, (node, hist)) in alltags.iteritems():
893 893 if node != nullid:
894 894 tags[encoding.tolocal(name)] = node
895 895 tags['tip'] = self.changelog.tip()
896 896 tagtypes = dict([(encoding.tolocal(name), value)
897 897 for (name, value) in tagtypes.iteritems()])
898 898 return (tags, tagtypes)
899 899
900 900 def tagtype(self, tagname):
901 901 '''
902 902 return the type of the given tag. result can be:
903 903
904 904 'local' : a local tag
905 905 'global' : a global tag
906 906 None : tag does not exist
907 907 '''
908 908
909 909 return self._tagscache.tagtypes.get(tagname)
910 910
911 911 def tagslist(self):
912 912 '''return a list of tags ordered by revision'''
913 913 if not self._tagscache.tagslist:
914 914 l = []
915 915 for t, n in self.tags().iteritems():
916 916 l.append((self.changelog.rev(n), t, n))
917 917 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
918 918
919 919 return self._tagscache.tagslist
920 920
921 921 def nodetags(self, node):
922 922 '''return the tags associated with a node'''
923 923 if not self._tagscache.nodetagscache:
924 924 nodetagscache = {}
925 925 for t, n in self._tagscache.tags.iteritems():
926 926 nodetagscache.setdefault(n, []).append(t)
927 927 for tags in nodetagscache.itervalues():
928 928 tags.sort()
929 929 self._tagscache.nodetagscache = nodetagscache
930 930 return self._tagscache.nodetagscache.get(node, [])
931 931
932 932 def nodebookmarks(self, node):
933 933 """return the list of bookmarks pointing to the specified node"""
934 934 marks = []
935 935 for bookmark, n in self._bookmarks.iteritems():
936 936 if n == node:
937 937 marks.append(bookmark)
938 938 return sorted(marks)
939 939
940 940 def branchmap(self):
941 941 '''returns a dictionary {branch: [branchheads]} with branchheads
942 942 ordered by increasing revision number'''
943 943 branchmap.updatecache(self)
944 944 return self._branchcaches[self.filtername]
945 945
946 946 @unfilteredmethod
947 947 def revbranchcache(self):
948 948 if not self._revbranchcache:
949 949 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
950 950 return self._revbranchcache
951 951
952 952 def branchtip(self, branch, ignoremissing=False):
953 953 '''return the tip node for a given branch
954 954
955 955 If ignoremissing is True, then this method will not raise an error.
956 956 This is helpful for callers that only expect None for a missing branch
957 957 (e.g. namespace).
958 958
959 959 '''
960 960 try:
961 961 return self.branchmap().branchtip(branch)
962 962 except KeyError:
963 963 if not ignoremissing:
964 964 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
965 965 else:
966 966 pass
967 967
968 968 def lookup(self, key):
969 969 return self[key].node()
970 970
971 971 def lookupbranch(self, key, remote=None):
972 972 repo = remote or self
973 973 if key in repo.branchmap():
974 974 return key
975 975
976 976 repo = (remote and remote.local()) and remote or self
977 977 return repo[key].branch()
978 978
979 979 def known(self, nodes):
980 980 cl = self.changelog
981 981 nm = cl.nodemap
982 982 filtered = cl.filteredrevs
983 983 result = []
984 984 for n in nodes:
985 985 r = nm.get(n)
986 986 resp = not (r is None or r in filtered)
987 987 result.append(resp)
988 988 return result
989 989
990 990 def local(self):
991 991 return self
992 992
993 993 def publishing(self):
994 994 # it's safe (and desirable) to trust the publish flag unconditionally
995 995 # so that we don't finalize changes shared between users via ssh or nfs
996 996 return self.ui.configbool('phases', 'publish', untrusted=True)
997 997
998 998 def cancopy(self):
999 999 # so statichttprepo's override of local() works
1000 1000 if not self.local():
1001 1001 return False
1002 1002 if not self.publishing():
1003 1003 return True
1004 1004 # if publishing we can't copy if there is filtered content
1005 1005 return not self.filtered('visible').changelog.filteredrevs
1006 1006
1007 1007 def shared(self):
1008 1008 '''the type of shared repository (None if not shared)'''
1009 1009 if self.sharedpath != self.path:
1010 1010 return 'store'
1011 1011 return None
1012 1012
1013 1013 def wjoin(self, f, *insidef):
1014 1014 return self.vfs.reljoin(self.root, f, *insidef)
1015 1015
1016 1016 def file(self, f):
1017 1017 if f[0] == '/':
1018 1018 f = f[1:]
1019 1019 return filelog.filelog(self.svfs, f)
1020 1020
1021 1021 def changectx(self, changeid):
1022 1022 return self[changeid]
1023 1023
1024 1024 def setparents(self, p1, p2=nullid):
1025 1025 with self.dirstate.parentchange():
1026 1026 copies = self.dirstate.setparents(p1, p2)
1027 1027 pctx = self[p1]
1028 1028 if copies:
1029 1029 # Adjust copy records, the dirstate cannot do it, it
1030 1030 # requires access to parents manifests. Preserve them
1031 1031 # only for entries added to first parent.
1032 1032 for f in copies:
1033 1033 if f not in pctx and copies[f] in pctx:
1034 1034 self.dirstate.copy(copies[f], f)
1035 1035 if p2 == nullid:
1036 1036 for f, s in sorted(self.dirstate.copies().items()):
1037 1037 if f not in pctx and s not in pctx:
1038 1038 self.dirstate.copy(None, f)
1039 1039
1040 1040 def filectx(self, path, changeid=None, fileid=None):
1041 1041 """changeid can be a changeset revision, node, or tag.
1042 1042 fileid can be a file revision or node."""
1043 1043 return context.filectx(self, path, changeid, fileid)
1044 1044
1045 1045 def getcwd(self):
1046 1046 return self.dirstate.getcwd()
1047 1047
1048 1048 def pathto(self, f, cwd=None):
1049 1049 return self.dirstate.pathto(f, cwd)
1050 1050
1051 1051 def _loadfilter(self, filter):
1052 1052 if filter not in self.filterpats:
1053 1053 l = []
1054 1054 for pat, cmd in self.ui.configitems(filter):
1055 1055 if cmd == '!':
1056 1056 continue
1057 1057 mf = matchmod.match(self.root, '', [pat])
1058 1058 fn = None
1059 1059 params = cmd
1060 1060 for name, filterfn in self._datafilters.iteritems():
1061 1061 if cmd.startswith(name):
1062 1062 fn = filterfn
1063 1063 params = cmd[len(name):].lstrip()
1064 1064 break
1065 1065 if not fn:
1066 1066 fn = lambda s, c, **kwargs: util.filter(s, c)
1067 1067 # Wrap old filters not supporting keyword arguments
1068 1068 if not inspect.getargspec(fn)[2]:
1069 1069 oldfn = fn
1070 1070 fn = lambda s, c, **kwargs: oldfn(s, c)
1071 1071 l.append((mf, fn, params))
1072 1072 self.filterpats[filter] = l
1073 1073 return self.filterpats[filter]
1074 1074
1075 1075 def _filter(self, filterpats, filename, data):
1076 1076 for mf, fn, cmd in filterpats:
1077 1077 if mf(filename):
1078 1078 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1079 1079 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1080 1080 break
1081 1081
1082 1082 return data
1083 1083
1084 1084 @unfilteredpropertycache
1085 1085 def _encodefilterpats(self):
1086 1086 return self._loadfilter('encode')
1087 1087
1088 1088 @unfilteredpropertycache
1089 1089 def _decodefilterpats(self):
1090 1090 return self._loadfilter('decode')
1091 1091
1092 1092 def adddatafilter(self, name, filter):
1093 1093 self._datafilters[name] = filter
1094 1094
1095 1095 def wread(self, filename):
1096 1096 if self.wvfs.islink(filename):
1097 1097 data = self.wvfs.readlink(filename)
1098 1098 else:
1099 1099 data = self.wvfs.read(filename)
1100 1100 return self._filter(self._encodefilterpats, filename, data)
1101 1101
1102 1102 def wwrite(self, filename, data, flags, backgroundclose=False):
1103 1103 """write ``data`` into ``filename`` in the working directory
1104 1104
1105 1105 This returns length of written (maybe decoded) data.
1106 1106 """
1107 1107 data = self._filter(self._decodefilterpats, filename, data)
1108 1108 if 'l' in flags:
1109 1109 self.wvfs.symlink(data, filename)
1110 1110 else:
1111 1111 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1112 1112 if 'x' in flags:
1113 1113 self.wvfs.setflags(filename, False, True)
1114 1114 return len(data)
1115 1115
1116 1116 def wwritedata(self, filename, data):
1117 1117 return self._filter(self._decodefilterpats, filename, data)
1118 1118
1119 1119 def currenttransaction(self):
1120 1120 """return the current transaction or None if non exists"""
1121 1121 if self._transref:
1122 1122 tr = self._transref()
1123 1123 else:
1124 1124 tr = None
1125 1125
1126 1126 if tr and tr.running():
1127 1127 return tr
1128 1128 return None
1129 1129
1130 1130 def transaction(self, desc, report=None):
1131 1131 if (self.ui.configbool('devel', 'all-warnings')
1132 1132 or self.ui.configbool('devel', 'check-locks')):
1133 1133 if self._currentlock(self._lockref) is None:
1134 1134 raise error.ProgrammingError('transaction requires locking')
1135 1135 tr = self.currenttransaction()
1136 1136 if tr is not None:
1137 scmutil.registersummarycallback(self, tr, desc)
1138 1137 return tr.nest()
1139 1138
1140 1139 # abort here if the journal already exists
1141 1140 if self.svfs.exists("journal"):
1142 1141 raise error.RepoError(
1143 1142 _("abandoned transaction found"),
1144 1143 hint=_("run 'hg recover' to clean up transaction"))
1145 1144
1146 1145 idbase = "%.40f#%f" % (random.random(), time.time())
1147 1146 ha = hex(hashlib.sha1(idbase).digest())
1148 1147 txnid = 'TXN:' + ha
1149 1148 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1150 1149
1151 1150 self._writejournal(desc)
1152 1151 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1153 1152 if report:
1154 1153 rp = report
1155 1154 else:
1156 1155 rp = self.ui.warn
1157 1156 vfsmap = {'plain': self.vfs} # root of .hg/
1158 1157 # we must avoid cyclic reference between repo and transaction.
1159 1158 reporef = weakref.ref(self)
1160 1159 # Code to track tag movement
1161 1160 #
1162 1161 # Since tags are all handled as file content, it is actually quite hard
1163 1162 # to track these movement from a code perspective. So we fallback to a
1164 1163 # tracking at the repository level. One could envision to track changes
1165 1164 # to the '.hgtags' file through changegroup apply but that fails to
1166 1165 # cope with case where transaction expose new heads without changegroup
1167 1166 # being involved (eg: phase movement).
1168 1167 #
1169 1168 # For now, We gate the feature behind a flag since this likely comes
1170 1169 # with performance impacts. The current code run more often than needed
1171 1170 # and do not use caches as much as it could. The current focus is on
1172 1171 # the behavior of the feature so we disable it by default. The flag
1173 1172 # will be removed when we are happy with the performance impact.
1174 1173 #
1175 1174 # Once this feature is no longer experimental move the following
1176 1175 # documentation to the appropriate help section:
1177 1176 #
1178 1177 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1179 1178 # tags (new or changed or deleted tags). In addition the details of
1180 1179 # these changes are made available in a file at:
1181 1180 # ``REPOROOT/.hg/changes/tags.changes``.
1182 1181 # Make sure you check for HG_TAG_MOVED before reading that file as it
1183 1182 # might exist from a previous transaction even if no tag were touched
1184 1183 # in this one. Changes are recorded in a line base format::
1185 1184 #
1186 1185 # <action> <hex-node> <tag-name>\n
1187 1186 #
1188 1187 # Actions are defined as follow:
1189 1188 # "-R": tag is removed,
1190 1189 # "+A": tag is added,
1191 1190 # "-M": tag is moved (old value),
1192 1191 # "+M": tag is moved (new value),
1193 1192 tracktags = lambda x: None
1194 1193 # experimental config: experimental.hook-track-tags
1195 1194 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1196 1195 if desc != 'strip' and shouldtracktags:
1197 1196 oldheads = self.changelog.headrevs()
1198 1197 def tracktags(tr2):
1199 1198 repo = reporef()
1200 1199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1201 1200 newheads = repo.changelog.headrevs()
1202 1201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1203 1202 # notes: we compare lists here.
1204 1203 # As we do it only once buiding set would not be cheaper
1205 1204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1206 1205 if changes:
1207 1206 tr2.hookargs['tag_moved'] = '1'
1208 1207 with repo.vfs('changes/tags.changes', 'w',
1209 1208 atomictemp=True) as changesfile:
1210 1209 # note: we do not register the file to the transaction
1211 1210 # because we needs it to still exist on the transaction
1212 1211 # is close (for txnclose hooks)
1213 1212 tagsmod.writediff(changesfile, changes)
1214 1213 def validate(tr2):
1215 1214 """will run pre-closing hooks"""
1216 1215 # XXX the transaction API is a bit lacking here so we take a hacky
1217 1216 # path for now
1218 1217 #
1219 1218 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1220 1219 # dict is copied before these run. In addition we needs the data
1221 1220 # available to in memory hooks too.
1222 1221 #
1223 1222 # Moreover, we also need to make sure this runs before txnclose
1224 1223 # hooks and there is no "pending" mechanism that would execute
1225 1224 # logic only if hooks are about to run.
1226 1225 #
1227 1226 # Fixing this limitation of the transaction is also needed to track
1228 1227 # other families of changes (bookmarks, phases, obsolescence).
1229 1228 #
1230 1229 # This will have to be fixed before we remove the experimental
1231 1230 # gating.
1232 1231 tracktags(tr2)
1233 1232 repo = reporef()
1234 1233 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1235 1234 scmutil.enforcesinglehead(repo, tr2, desc)
1236 1235 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1237 1236 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1238 1237 args = tr.hookargs.copy()
1239 1238 args.update(bookmarks.preparehookargs(name, old, new))
1240 1239 repo.hook('pretxnclose-bookmark', throw=True,
1241 1240 txnname=desc,
1242 1241 **pycompat.strkwargs(args))
1243 1242 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1244 1243 cl = repo.unfiltered().changelog
1245 1244 for rev, (old, new) in tr.changes['phases'].items():
1246 1245 args = tr.hookargs.copy()
1247 1246 node = hex(cl.node(rev))
1248 1247 args.update(phases.preparehookargs(node, old, new))
1249 1248 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1250 1249 **pycompat.strkwargs(args))
1251 1250
1252 1251 repo.hook('pretxnclose', throw=True,
1253 1252 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1254 1253 def releasefn(tr, success):
1255 1254 repo = reporef()
1256 1255 if success:
1257 1256 # this should be explicitly invoked here, because
1258 1257 # in-memory changes aren't written out at closing
1259 1258 # transaction, if tr.addfilegenerator (via
1260 1259 # dirstate.write or so) isn't invoked while
1261 1260 # transaction running
1262 1261 repo.dirstate.write(None)
1263 1262 else:
1264 1263 # discard all changes (including ones already written
1265 1264 # out) in this transaction
1266 1265 repo.dirstate.restorebackup(None, 'journal.dirstate')
1267 1266
1268 1267 repo.invalidate(clearfilecache=True)
1269 1268
1270 1269 tr = transaction.transaction(rp, self.svfs, vfsmap,
1271 1270 "journal",
1272 1271 "undo",
1273 1272 aftertrans(renames),
1274 1273 self.store.createmode,
1275 1274 validator=validate,
1276 1275 releasefn=releasefn,
1277 1276 checkambigfiles=_cachedfiles)
1278 1277 tr.changes['revs'] = xrange(0, 0)
1279 1278 tr.changes['obsmarkers'] = set()
1280 1279 tr.changes['phases'] = {}
1281 1280 tr.changes['bookmarks'] = {}
1282 1281
1283 1282 tr.hookargs['txnid'] = txnid
1284 1283 # note: writing the fncache only during finalize mean that the file is
1285 1284 # outdated when running hooks. As fncache is used for streaming clone,
1286 1285 # this is not expected to break anything that happen during the hooks.
1287 1286 tr.addfinalize('flush-fncache', self.store.write)
1288 1287 def txnclosehook(tr2):
1289 1288 """To be run if transaction is successful, will schedule a hook run
1290 1289 """
1291 1290 # Don't reference tr2 in hook() so we don't hold a reference.
1292 1291 # This reduces memory consumption when there are multiple
1293 1292 # transactions per lock. This can likely go away if issue5045
1294 1293 # fixes the function accumulation.
1295 1294 hookargs = tr2.hookargs
1296 1295
1297 1296 def hookfunc():
1298 1297 repo = reporef()
1299 1298 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1300 1299 bmchanges = sorted(tr.changes['bookmarks'].items())
1301 1300 for name, (old, new) in bmchanges:
1302 1301 args = tr.hookargs.copy()
1303 1302 args.update(bookmarks.preparehookargs(name, old, new))
1304 1303 repo.hook('txnclose-bookmark', throw=False,
1305 1304 txnname=desc, **pycompat.strkwargs(args))
1306 1305
1307 1306 if hook.hashook(repo.ui, 'txnclose-phase'):
1308 1307 cl = repo.unfiltered().changelog
1309 1308 phasemv = sorted(tr.changes['phases'].items())
1310 1309 for rev, (old, new) in phasemv:
1311 1310 args = tr.hookargs.copy()
1312 1311 node = hex(cl.node(rev))
1313 1312 args.update(phases.preparehookargs(node, old, new))
1314 1313 repo.hook('txnclose-phase', throw=False, txnname=desc,
1315 1314 **pycompat.strkwargs(args))
1316 1315
1317 1316 repo.hook('txnclose', throw=False, txnname=desc,
1318 1317 **pycompat.strkwargs(hookargs))
1319 1318 reporef()._afterlock(hookfunc)
1320 1319 tr.addfinalize('txnclose-hook', txnclosehook)
1321 1320 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1322 1321 def txnaborthook(tr2):
1323 1322 """To be run if transaction is aborted
1324 1323 """
1325 1324 reporef().hook('txnabort', throw=False, txnname=desc,
1326 1325 **tr2.hookargs)
1327 1326 tr.addabort('txnabort-hook', txnaborthook)
1328 1327 # avoid eager cache invalidation. in-memory data should be identical
1329 1328 # to stored data if transaction has no error.
1330 1329 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1331 1330 self._transref = weakref.ref(tr)
1332 1331 scmutil.registersummarycallback(self, tr, desc)
1333 1332 return tr
1334 1333
1335 1334 def _journalfiles(self):
1336 1335 return ((self.svfs, 'journal'),
1337 1336 (self.vfs, 'journal.dirstate'),
1338 1337 (self.vfs, 'journal.branch'),
1339 1338 (self.vfs, 'journal.desc'),
1340 1339 (self.vfs, 'journal.bookmarks'),
1341 1340 (self.svfs, 'journal.phaseroots'))
1342 1341
1343 1342 def undofiles(self):
1344 1343 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1345 1344
1346 1345 @unfilteredmethod
1347 1346 def _writejournal(self, desc):
1348 1347 self.dirstate.savebackup(None, 'journal.dirstate')
1349 1348 self.vfs.write("journal.branch",
1350 1349 encoding.fromlocal(self.dirstate.branch()))
1351 1350 self.vfs.write("journal.desc",
1352 1351 "%d\n%s\n" % (len(self), desc))
1353 1352 self.vfs.write("journal.bookmarks",
1354 1353 self.vfs.tryread("bookmarks"))
1355 1354 self.svfs.write("journal.phaseroots",
1356 1355 self.svfs.tryread("phaseroots"))
1357 1356
1358 1357 def recover(self):
1359 1358 with self.lock():
1360 1359 if self.svfs.exists("journal"):
1361 1360 self.ui.status(_("rolling back interrupted transaction\n"))
1362 1361 vfsmap = {'': self.svfs,
1363 1362 'plain': self.vfs,}
1364 1363 transaction.rollback(self.svfs, vfsmap, "journal",
1365 1364 self.ui.warn,
1366 1365 checkambigfiles=_cachedfiles)
1367 1366 self.invalidate()
1368 1367 return True
1369 1368 else:
1370 1369 self.ui.warn(_("no interrupted transaction available\n"))
1371 1370 return False
1372 1371
1373 1372 def rollback(self, dryrun=False, force=False):
1374 1373 wlock = lock = dsguard = None
1375 1374 try:
1376 1375 wlock = self.wlock()
1377 1376 lock = self.lock()
1378 1377 if self.svfs.exists("undo"):
1379 1378 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1380 1379
1381 1380 return self._rollback(dryrun, force, dsguard)
1382 1381 else:
1383 1382 self.ui.warn(_("no rollback information available\n"))
1384 1383 return 1
1385 1384 finally:
1386 1385 release(dsguard, lock, wlock)
1387 1386
1388 1387 @unfilteredmethod # Until we get smarter cache management
1389 1388 def _rollback(self, dryrun, force, dsguard):
1390 1389 ui = self.ui
1391 1390 try:
1392 1391 args = self.vfs.read('undo.desc').splitlines()
1393 1392 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1394 1393 if len(args) >= 3:
1395 1394 detail = args[2]
1396 1395 oldtip = oldlen - 1
1397 1396
1398 1397 if detail and ui.verbose:
1399 1398 msg = (_('repository tip rolled back to revision %d'
1400 1399 ' (undo %s: %s)\n')
1401 1400 % (oldtip, desc, detail))
1402 1401 else:
1403 1402 msg = (_('repository tip rolled back to revision %d'
1404 1403 ' (undo %s)\n')
1405 1404 % (oldtip, desc))
1406 1405 except IOError:
1407 1406 msg = _('rolling back unknown transaction\n')
1408 1407 desc = None
1409 1408
1410 1409 if not force and self['.'] != self['tip'] and desc == 'commit':
1411 1410 raise error.Abort(
1412 1411 _('rollback of last commit while not checked out '
1413 1412 'may lose data'), hint=_('use -f to force'))
1414 1413
1415 1414 ui.status(msg)
1416 1415 if dryrun:
1417 1416 return 0
1418 1417
1419 1418 parents = self.dirstate.parents()
1420 1419 self.destroying()
1421 1420 vfsmap = {'plain': self.vfs, '': self.svfs}
1422 1421 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1423 1422 checkambigfiles=_cachedfiles)
1424 1423 if self.vfs.exists('undo.bookmarks'):
1425 1424 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1426 1425 if self.svfs.exists('undo.phaseroots'):
1427 1426 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1428 1427 self.invalidate()
1429 1428
1430 1429 parentgone = (parents[0] not in self.changelog.nodemap or
1431 1430 parents[1] not in self.changelog.nodemap)
1432 1431 if parentgone:
1433 1432 # prevent dirstateguard from overwriting already restored one
1434 1433 dsguard.close()
1435 1434
1436 1435 self.dirstate.restorebackup(None, 'undo.dirstate')
1437 1436 try:
1438 1437 branch = self.vfs.read('undo.branch')
1439 1438 self.dirstate.setbranch(encoding.tolocal(branch))
1440 1439 except IOError:
1441 1440 ui.warn(_('named branch could not be reset: '
1442 1441 'current branch is still \'%s\'\n')
1443 1442 % self.dirstate.branch())
1444 1443
1445 1444 parents = tuple([p.rev() for p in self[None].parents()])
1446 1445 if len(parents) > 1:
1447 1446 ui.status(_('working directory now based on '
1448 1447 'revisions %d and %d\n') % parents)
1449 1448 else:
1450 1449 ui.status(_('working directory now based on '
1451 1450 'revision %d\n') % parents)
1452 1451 mergemod.mergestate.clean(self, self['.'].node())
1453 1452
1454 1453 # TODO: if we know which new heads may result from this rollback, pass
1455 1454 # them to destroy(), which will prevent the branchhead cache from being
1456 1455 # invalidated.
1457 1456 self.destroyed()
1458 1457 return 0
1459 1458
1460 1459 def _buildcacheupdater(self, newtransaction):
1461 1460 """called during transaction to build the callback updating cache
1462 1461
1463 1462 Lives on the repository to help extension who might want to augment
1464 1463 this logic. For this purpose, the created transaction is passed to the
1465 1464 method.
1466 1465 """
1467 1466 # we must avoid cyclic reference between repo and transaction.
1468 1467 reporef = weakref.ref(self)
1469 1468 def updater(tr):
1470 1469 repo = reporef()
1471 1470 repo.updatecaches(tr)
1472 1471 return updater
1473 1472
1474 1473 @unfilteredmethod
1475 1474 def updatecaches(self, tr=None):
1476 1475 """warm appropriate caches
1477 1476
1478 1477 If this function is called after a transaction closed. The transaction
1479 1478 will be available in the 'tr' argument. This can be used to selectively
1480 1479 update caches relevant to the changes in that transaction.
1481 1480 """
1482 1481 if tr is not None and tr.hookargs.get('source') == 'strip':
1483 1482 # During strip, many caches are invalid but
1484 1483 # later call to `destroyed` will refresh them.
1485 1484 return
1486 1485
1487 1486 if tr is None or tr.changes['revs']:
1488 1487 # updating the unfiltered branchmap should refresh all the others,
1489 1488 self.ui.debug('updating the branch cache\n')
1490 1489 branchmap.updatecache(self.filtered('served'))
1491 1490
1492 1491 def invalidatecaches(self):
1493 1492
1494 1493 if '_tagscache' in vars(self):
1495 1494 # can't use delattr on proxy
1496 1495 del self.__dict__['_tagscache']
1497 1496
1498 1497 self.unfiltered()._branchcaches.clear()
1499 1498 self.invalidatevolatilesets()
1500 1499 self._sparsesignaturecache.clear()
1501 1500
1502 1501 def invalidatevolatilesets(self):
1503 1502 self.filteredrevcache.clear()
1504 1503 obsolete.clearobscaches(self)
1505 1504
1506 1505 def invalidatedirstate(self):
1507 1506 '''Invalidates the dirstate, causing the next call to dirstate
1508 1507 to check if it was modified since the last time it was read,
1509 1508 rereading it if it has.
1510 1509
1511 1510 This is different to dirstate.invalidate() that it doesn't always
1512 1511 rereads the dirstate. Use dirstate.invalidate() if you want to
1513 1512 explicitly read the dirstate again (i.e. restoring it to a previous
1514 1513 known good state).'''
1515 1514 if hasunfilteredcache(self, 'dirstate'):
1516 1515 for k in self.dirstate._filecache:
1517 1516 try:
1518 1517 delattr(self.dirstate, k)
1519 1518 except AttributeError:
1520 1519 pass
1521 1520 delattr(self.unfiltered(), 'dirstate')
1522 1521
1523 1522 def invalidate(self, clearfilecache=False):
1524 1523 '''Invalidates both store and non-store parts other than dirstate
1525 1524
1526 1525 If a transaction is running, invalidation of store is omitted,
1527 1526 because discarding in-memory changes might cause inconsistency
1528 1527 (e.g. incomplete fncache causes unintentional failure, but
1529 1528 redundant one doesn't).
1530 1529 '''
1531 1530 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1532 1531 for k in list(self._filecache.keys()):
1533 1532 # dirstate is invalidated separately in invalidatedirstate()
1534 1533 if k == 'dirstate':
1535 1534 continue
1536 1535 if (k == 'changelog' and
1537 1536 self.currenttransaction() and
1538 1537 self.changelog._delayed):
1539 1538 # The changelog object may store unwritten revisions. We don't
1540 1539 # want to lose them.
1541 1540 # TODO: Solve the problem instead of working around it.
1542 1541 continue
1543 1542
1544 1543 if clearfilecache:
1545 1544 del self._filecache[k]
1546 1545 try:
1547 1546 delattr(unfiltered, k)
1548 1547 except AttributeError:
1549 1548 pass
1550 1549 self.invalidatecaches()
1551 1550 if not self.currenttransaction():
1552 1551 # TODO: Changing contents of store outside transaction
1553 1552 # causes inconsistency. We should make in-memory store
1554 1553 # changes detectable, and abort if changed.
1555 1554 self.store.invalidatecaches()
1556 1555
1557 1556 def invalidateall(self):
1558 1557 '''Fully invalidates both store and non-store parts, causing the
1559 1558 subsequent operation to reread any outside changes.'''
1560 1559 # extension should hook this to invalidate its caches
1561 1560 self.invalidate()
1562 1561 self.invalidatedirstate()
1563 1562
1564 1563 @unfilteredmethod
1565 1564 def _refreshfilecachestats(self, tr):
1566 1565 """Reload stats of cached files so that they are flagged as valid"""
1567 1566 for k, ce in self._filecache.items():
1568 1567 if k == 'dirstate' or k not in self.__dict__:
1569 1568 continue
1570 1569 ce.refresh()
1571 1570
1572 1571 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1573 1572 inheritchecker=None, parentenvvar=None):
1574 1573 parentlock = None
1575 1574 # the contents of parentenvvar are used by the underlying lock to
1576 1575 # determine whether it can be inherited
1577 1576 if parentenvvar is not None:
1578 1577 parentlock = encoding.environ.get(parentenvvar)
1579 1578
1580 1579 timeout = 0
1581 1580 warntimeout = 0
1582 1581 if wait:
1583 1582 timeout = self.ui.configint("ui", "timeout")
1584 1583 warntimeout = self.ui.configint("ui", "timeout.warn")
1585 1584
1586 1585 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1587 1586 releasefn=releasefn,
1588 1587 acquirefn=acquirefn, desc=desc,
1589 1588 inheritchecker=inheritchecker,
1590 1589 parentlock=parentlock)
1591 1590 return l
1592 1591
1593 1592 def _afterlock(self, callback):
1594 1593 """add a callback to be run when the repository is fully unlocked
1595 1594
1596 1595 The callback will be executed when the outermost lock is released
1597 1596 (with wlock being higher level than 'lock')."""
1598 1597 for ref in (self._wlockref, self._lockref):
1599 1598 l = ref and ref()
1600 1599 if l and l.held:
1601 1600 l.postrelease.append(callback)
1602 1601 break
1603 1602 else: # no lock have been found.
1604 1603 callback()
1605 1604
1606 1605 def lock(self, wait=True):
1607 1606 '''Lock the repository store (.hg/store) and return a weak reference
1608 1607 to the lock. Use this before modifying the store (e.g. committing or
1609 1608 stripping). If you are opening a transaction, get a lock as well.)
1610 1609
1611 1610 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1612 1611 'wlock' first to avoid a dead-lock hazard.'''
1613 1612 l = self._currentlock(self._lockref)
1614 1613 if l is not None:
1615 1614 l.lock()
1616 1615 return l
1617 1616
1618 1617 l = self._lock(self.svfs, "lock", wait, None,
1619 1618 self.invalidate, _('repository %s') % self.origroot)
1620 1619 self._lockref = weakref.ref(l)
1621 1620 return l
1622 1621
1623 1622 def _wlockchecktransaction(self):
1624 1623 if self.currenttransaction() is not None:
1625 1624 raise error.LockInheritanceContractViolation(
1626 1625 'wlock cannot be inherited in the middle of a transaction')
1627 1626
1628 1627 def wlock(self, wait=True):
1629 1628 '''Lock the non-store parts of the repository (everything under
1630 1629 .hg except .hg/store) and return a weak reference to the lock.
1631 1630
1632 1631 Use this before modifying files in .hg.
1633 1632
1634 1633 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1635 1634 'wlock' first to avoid a dead-lock hazard.'''
1636 1635 l = self._wlockref and self._wlockref()
1637 1636 if l is not None and l.held:
1638 1637 l.lock()
1639 1638 return l
1640 1639
1641 1640 # We do not need to check for non-waiting lock acquisition. Such
1642 1641 # acquisition would not cause dead-lock as they would just fail.
1643 1642 if wait and (self.ui.configbool('devel', 'all-warnings')
1644 1643 or self.ui.configbool('devel', 'check-locks')):
1645 1644 if self._currentlock(self._lockref) is not None:
1646 1645 self.ui.develwarn('"wlock" acquired after "lock"')
1647 1646
1648 1647 def unlock():
1649 1648 if self.dirstate.pendingparentchange():
1650 1649 self.dirstate.invalidate()
1651 1650 else:
1652 1651 self.dirstate.write(None)
1653 1652
1654 1653 self._filecache['dirstate'].refresh()
1655 1654
1656 1655 l = self._lock(self.vfs, "wlock", wait, unlock,
1657 1656 self.invalidatedirstate, _('working directory of %s') %
1658 1657 self.origroot,
1659 1658 inheritchecker=self._wlockchecktransaction,
1660 1659 parentenvvar='HG_WLOCK_LOCKER')
1661 1660 self._wlockref = weakref.ref(l)
1662 1661 return l
1663 1662
1664 1663 def _currentlock(self, lockref):
1665 1664 """Returns the lock if it's held, or None if it's not."""
1666 1665 if lockref is None:
1667 1666 return None
1668 1667 l = lockref()
1669 1668 if l is None or not l.held:
1670 1669 return None
1671 1670 return l
1672 1671
1673 1672 def currentwlock(self):
1674 1673 """Returns the wlock if it's held, or None if it's not."""
1675 1674 return self._currentlock(self._wlockref)
1676 1675
1677 1676 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1678 1677 """
1679 1678 commit an individual file as part of a larger transaction
1680 1679 """
1681 1680
1682 1681 fname = fctx.path()
1683 1682 fparent1 = manifest1.get(fname, nullid)
1684 1683 fparent2 = manifest2.get(fname, nullid)
1685 1684 if isinstance(fctx, context.filectx):
1686 1685 node = fctx.filenode()
1687 1686 if node in [fparent1, fparent2]:
1688 1687 self.ui.debug('reusing %s filelog entry\n' % fname)
1689 1688 if manifest1.flags(fname) != fctx.flags():
1690 1689 changelist.append(fname)
1691 1690 return node
1692 1691
1693 1692 flog = self.file(fname)
1694 1693 meta = {}
1695 1694 copy = fctx.renamed()
1696 1695 if copy and copy[0] != fname:
1697 1696 # Mark the new revision of this file as a copy of another
1698 1697 # file. This copy data will effectively act as a parent
1699 1698 # of this new revision. If this is a merge, the first
1700 1699 # parent will be the nullid (meaning "look up the copy data")
1701 1700 # and the second one will be the other parent. For example:
1702 1701 #
1703 1702 # 0 --- 1 --- 3 rev1 changes file foo
1704 1703 # \ / rev2 renames foo to bar and changes it
1705 1704 # \- 2 -/ rev3 should have bar with all changes and
1706 1705 # should record that bar descends from
1707 1706 # bar in rev2 and foo in rev1
1708 1707 #
1709 1708 # this allows this merge to succeed:
1710 1709 #
1711 1710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1712 1711 # \ / merging rev3 and rev4 should use bar@rev2
1713 1712 # \- 2 --- 4 as the merge base
1714 1713 #
1715 1714
1716 1715 cfname = copy[0]
1717 1716 crev = manifest1.get(cfname)
1718 1717 newfparent = fparent2
1719 1718
1720 1719 if manifest2: # branch merge
1721 1720 if fparent2 == nullid or crev is None: # copied on remote side
1722 1721 if cfname in manifest2:
1723 1722 crev = manifest2[cfname]
1724 1723 newfparent = fparent1
1725 1724
1726 1725 # Here, we used to search backwards through history to try to find
1727 1726 # where the file copy came from if the source of a copy was not in
1728 1727 # the parent directory. However, this doesn't actually make sense to
1729 1728 # do (what does a copy from something not in your working copy even
1730 1729 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1731 1730 # the user that copy information was dropped, so if they didn't
1732 1731 # expect this outcome it can be fixed, but this is the correct
1733 1732 # behavior in this circumstance.
1734 1733
1735 1734 if crev:
1736 1735 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1737 1736 meta["copy"] = cfname
1738 1737 meta["copyrev"] = hex(crev)
1739 1738 fparent1, fparent2 = nullid, newfparent
1740 1739 else:
1741 1740 self.ui.warn(_("warning: can't find ancestor for '%s' "
1742 1741 "copied from '%s'!\n") % (fname, cfname))
1743 1742
1744 1743 elif fparent1 == nullid:
1745 1744 fparent1, fparent2 = fparent2, nullid
1746 1745 elif fparent2 != nullid:
1747 1746 # is one parent an ancestor of the other?
1748 1747 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1749 1748 if fparent1 in fparentancestors:
1750 1749 fparent1, fparent2 = fparent2, nullid
1751 1750 elif fparent2 in fparentancestors:
1752 1751 fparent2 = nullid
1753 1752
1754 1753 # is the file changed?
1755 1754 text = fctx.data()
1756 1755 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1757 1756 changelist.append(fname)
1758 1757 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1759 1758 # are just the flags changed during merge?
1760 1759 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1761 1760 changelist.append(fname)
1762 1761
1763 1762 return fparent1
1764 1763
1765 1764 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1766 1765 """check for commit arguments that aren't committable"""
1767 1766 if match.isexact() or match.prefix():
1768 1767 matched = set(status.modified + status.added + status.removed)
1769 1768
1770 1769 for f in match.files():
1771 1770 f = self.dirstate.normalize(f)
1772 1771 if f == '.' or f in matched or f in wctx.substate:
1773 1772 continue
1774 1773 if f in status.deleted:
1775 1774 fail(f, _('file not found!'))
1776 1775 if f in vdirs: # visited directory
1777 1776 d = f + '/'
1778 1777 for mf in matched:
1779 1778 if mf.startswith(d):
1780 1779 break
1781 1780 else:
1782 1781 fail(f, _("no match under directory!"))
1783 1782 elif f not in self.dirstate:
1784 1783 fail(f, _("file not tracked!"))
1785 1784
1786 1785 @unfilteredmethod
1787 1786 def commit(self, text="", user=None, date=None, match=None, force=False,
1788 1787 editor=False, extra=None):
1789 1788 """Add a new revision to current repository.
1790 1789
1791 1790 Revision information is gathered from the working directory,
1792 1791 match can be used to filter the committed files. If editor is
1793 1792 supplied, it is called to get a commit message.
1794 1793 """
1795 1794 if extra is None:
1796 1795 extra = {}
1797 1796
1798 1797 def fail(f, msg):
1799 1798 raise error.Abort('%s: %s' % (f, msg))
1800 1799
1801 1800 if not match:
1802 1801 match = matchmod.always(self.root, '')
1803 1802
1804 1803 if not force:
1805 1804 vdirs = []
1806 1805 match.explicitdir = vdirs.append
1807 1806 match.bad = fail
1808 1807
1809 1808 wlock = lock = tr = None
1810 1809 try:
1811 1810 wlock = self.wlock()
1812 1811 lock = self.lock() # for recent changelog (see issue4368)
1813 1812
1814 1813 wctx = self[None]
1815 1814 merge = len(wctx.parents()) > 1
1816 1815
1817 1816 if not force and merge and not match.always():
1818 1817 raise error.Abort(_('cannot partially commit a merge '
1819 1818 '(do not specify files or patterns)'))
1820 1819
1821 1820 status = self.status(match=match, clean=force)
1822 1821 if force:
1823 1822 status.modified.extend(status.clean) # mq may commit clean files
1824 1823
1825 1824 # check subrepos
1826 1825 subs, commitsubs, newstate = subrepo.precommit(
1827 1826 self.ui, wctx, status, match, force=force)
1828 1827
1829 1828 # make sure all explicit patterns are matched
1830 1829 if not force:
1831 1830 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1832 1831
1833 1832 cctx = context.workingcommitctx(self, status,
1834 1833 text, user, date, extra)
1835 1834
1836 1835 # internal config: ui.allowemptycommit
1837 1836 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1838 1837 or extra.get('close') or merge or cctx.files()
1839 1838 or self.ui.configbool('ui', 'allowemptycommit'))
1840 1839 if not allowemptycommit:
1841 1840 return None
1842 1841
1843 1842 if merge and cctx.deleted():
1844 1843 raise error.Abort(_("cannot commit merge with missing files"))
1845 1844
1846 1845 ms = mergemod.mergestate.read(self)
1847 1846 mergeutil.checkunresolved(ms)
1848 1847
1849 1848 if editor:
1850 1849 cctx._text = editor(self, cctx, subs)
1851 1850 edited = (text != cctx._text)
1852 1851
1853 1852 # Save commit message in case this transaction gets rolled back
1854 1853 # (e.g. by a pretxncommit hook). Leave the content alone on
1855 1854 # the assumption that the user will use the same editor again.
1856 1855 msgfn = self.savecommitmessage(cctx._text)
1857 1856
1858 1857 # commit subs and write new state
1859 1858 if subs:
1860 1859 for s in sorted(commitsubs):
1861 1860 sub = wctx.sub(s)
1862 1861 self.ui.status(_('committing subrepository %s\n') %
1863 1862 subrepo.subrelpath(sub))
1864 1863 sr = sub.commit(cctx._text, user, date)
1865 1864 newstate[s] = (newstate[s][0], sr)
1866 1865 subrepo.writestate(self, newstate)
1867 1866
1868 1867 p1, p2 = self.dirstate.parents()
1869 1868 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1870 1869 try:
1871 1870 self.hook("precommit", throw=True, parent1=hookp1,
1872 1871 parent2=hookp2)
1873 1872 tr = self.transaction('commit')
1874 1873 ret = self.commitctx(cctx, True)
1875 1874 except: # re-raises
1876 1875 if edited:
1877 1876 self.ui.write(
1878 1877 _('note: commit message saved in %s\n') % msgfn)
1879 1878 raise
1880 1879 # update bookmarks, dirstate and mergestate
1881 1880 bookmarks.update(self, [p1, p2], ret)
1882 1881 cctx.markcommitted(ret)
1883 1882 ms.reset()
1884 1883 tr.close()
1885 1884
1886 1885 finally:
1887 1886 lockmod.release(tr, lock, wlock)
1888 1887
1889 1888 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1890 1889 # hack for command that use a temporary commit (eg: histedit)
1891 1890 # temporary commit got stripped before hook release
1892 1891 if self.changelog.hasnode(ret):
1893 1892 self.hook("commit", node=node, parent1=parent1,
1894 1893 parent2=parent2)
1895 1894 self._afterlock(commithook)
1896 1895 return ret
1897 1896
1898 1897 @unfilteredmethod
1899 1898 def commitctx(self, ctx, error=False):
1900 1899 """Add a new revision to current repository.
1901 1900 Revision information is passed via the context argument.
1902 1901 """
1903 1902
1904 1903 tr = None
1905 1904 p1, p2 = ctx.p1(), ctx.p2()
1906 1905 user = ctx.user()
1907 1906
1908 1907 lock = self.lock()
1909 1908 try:
1910 1909 tr = self.transaction("commit")
1911 1910 trp = weakref.proxy(tr)
1912 1911
1913 1912 if ctx.manifestnode():
1914 1913 # reuse an existing manifest revision
1915 1914 mn = ctx.manifestnode()
1916 1915 files = ctx.files()
1917 1916 elif ctx.files():
1918 1917 m1ctx = p1.manifestctx()
1919 1918 m2ctx = p2.manifestctx()
1920 1919 mctx = m1ctx.copy()
1921 1920
1922 1921 m = mctx.read()
1923 1922 m1 = m1ctx.read()
1924 1923 m2 = m2ctx.read()
1925 1924
1926 1925 # check in files
1927 1926 added = []
1928 1927 changed = []
1929 1928 removed = list(ctx.removed())
1930 1929 linkrev = len(self)
1931 1930 self.ui.note(_("committing files:\n"))
1932 1931 for f in sorted(ctx.modified() + ctx.added()):
1933 1932 self.ui.note(f + "\n")
1934 1933 try:
1935 1934 fctx = ctx[f]
1936 1935 if fctx is None:
1937 1936 removed.append(f)
1938 1937 else:
1939 1938 added.append(f)
1940 1939 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1941 1940 trp, changed)
1942 1941 m.setflag(f, fctx.flags())
1943 1942 except OSError as inst:
1944 1943 self.ui.warn(_("trouble committing %s!\n") % f)
1945 1944 raise
1946 1945 except IOError as inst:
1947 1946 errcode = getattr(inst, 'errno', errno.ENOENT)
1948 1947 if error or errcode and errcode != errno.ENOENT:
1949 1948 self.ui.warn(_("trouble committing %s!\n") % f)
1950 1949 raise
1951 1950
1952 1951 # update manifest
1953 1952 self.ui.note(_("committing manifest\n"))
1954 1953 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1955 1954 drop = [f for f in removed if f in m]
1956 1955 for f in drop:
1957 1956 del m[f]
1958 1957 mn = mctx.write(trp, linkrev,
1959 1958 p1.manifestnode(), p2.manifestnode(),
1960 1959 added, drop)
1961 1960 files = changed + removed
1962 1961 else:
1963 1962 mn = p1.manifestnode()
1964 1963 files = []
1965 1964
1966 1965 # update changelog
1967 1966 self.ui.note(_("committing changelog\n"))
1968 1967 self.changelog.delayupdate(tr)
1969 1968 n = self.changelog.add(mn, files, ctx.description(),
1970 1969 trp, p1.node(), p2.node(),
1971 1970 user, ctx.date(), ctx.extra().copy())
1972 1971 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1973 1972 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1974 1973 parent2=xp2)
1975 1974 # set the new commit is proper phase
1976 1975 targetphase = subrepo.newcommitphase(self.ui, ctx)
1977 1976 if targetphase:
1978 1977 # retract boundary do not alter parent changeset.
1979 1978 # if a parent have higher the resulting phase will
1980 1979 # be compliant anyway
1981 1980 #
1982 1981 # if minimal phase was 0 we don't need to retract anything
1983 1982 phases.registernew(self, tr, targetphase, [n])
1984 1983 tr.close()
1985 1984 return n
1986 1985 finally:
1987 1986 if tr:
1988 1987 tr.release()
1989 1988 lock.release()
1990 1989
1991 1990 @unfilteredmethod
1992 1991 def destroying(self):
1993 1992 '''Inform the repository that nodes are about to be destroyed.
1994 1993 Intended for use by strip and rollback, so there's a common
1995 1994 place for anything that has to be done before destroying history.
1996 1995
1997 1996 This is mostly useful for saving state that is in memory and waiting
1998 1997 to be flushed when the current lock is released. Because a call to
1999 1998 destroyed is imminent, the repo will be invalidated causing those
2000 1999 changes to stay in memory (waiting for the next unlock), or vanish
2001 2000 completely.
2002 2001 '''
2003 2002 # When using the same lock to commit and strip, the phasecache is left
2004 2003 # dirty after committing. Then when we strip, the repo is invalidated,
2005 2004 # causing those changes to disappear.
2006 2005 if '_phasecache' in vars(self):
2007 2006 self._phasecache.write()
2008 2007
2009 2008 @unfilteredmethod
2010 2009 def destroyed(self):
2011 2010 '''Inform the repository that nodes have been destroyed.
2012 2011 Intended for use by strip and rollback, so there's a common
2013 2012 place for anything that has to be done after destroying history.
2014 2013 '''
2015 2014 # When one tries to:
2016 2015 # 1) destroy nodes thus calling this method (e.g. strip)
2017 2016 # 2) use phasecache somewhere (e.g. commit)
2018 2017 #
2019 2018 # then 2) will fail because the phasecache contains nodes that were
2020 2019 # removed. We can either remove phasecache from the filecache,
2021 2020 # causing it to reload next time it is accessed, or simply filter
2022 2021 # the removed nodes now and write the updated cache.
2023 2022 self._phasecache.filterunknown(self)
2024 2023 self._phasecache.write()
2025 2024
2026 2025 # refresh all repository caches
2027 2026 self.updatecaches()
2028 2027
2029 2028 # Ensure the persistent tag cache is updated. Doing it now
2030 2029 # means that the tag cache only has to worry about destroyed
2031 2030 # heads immediately after a strip/rollback. That in turn
2032 2031 # guarantees that "cachetip == currenttip" (comparing both rev
2033 2032 # and node) always means no nodes have been added or destroyed.
2034 2033
2035 2034 # XXX this is suboptimal when qrefresh'ing: we strip the current
2036 2035 # head, refresh the tag cache, then immediately add a new head.
2037 2036 # But I think doing it this way is necessary for the "instant
2038 2037 # tag cache retrieval" case to work.
2039 2038 self.invalidate()
2040 2039
2041 2040 def walk(self, match, node=None):
2042 2041 '''
2043 2042 walk recursively through the directory tree or a given
2044 2043 changeset, finding all files matched by the match
2045 2044 function
2046 2045 '''
2047 2046 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2048 2047 return self[node].walk(match)
2049 2048
2050 2049 def status(self, node1='.', node2=None, match=None,
2051 2050 ignored=False, clean=False, unknown=False,
2052 2051 listsubrepos=False):
2053 2052 '''a convenience method that calls node1.status(node2)'''
2054 2053 return self[node1].status(node2, match, ignored, clean, unknown,
2055 2054 listsubrepos)
2056 2055
2057 2056 def addpostdsstatus(self, ps):
2058 2057 """Add a callback to run within the wlock, at the point at which status
2059 2058 fixups happen.
2060 2059
2061 2060 On status completion, callback(wctx, status) will be called with the
2062 2061 wlock held, unless the dirstate has changed from underneath or the wlock
2063 2062 couldn't be grabbed.
2064 2063
2065 2064 Callbacks should not capture and use a cached copy of the dirstate --
2066 2065 it might change in the meanwhile. Instead, they should access the
2067 2066 dirstate via wctx.repo().dirstate.
2068 2067
2069 2068 This list is emptied out after each status run -- extensions should
2070 2069 make sure it adds to this list each time dirstate.status is called.
2071 2070 Extensions should also make sure they don't call this for statuses
2072 2071 that don't involve the dirstate.
2073 2072 """
2074 2073
2075 2074 # The list is located here for uniqueness reasons -- it is actually
2076 2075 # managed by the workingctx, but that isn't unique per-repo.
2077 2076 self._postdsstatus.append(ps)
2078 2077
2079 2078 def postdsstatus(self):
2080 2079 """Used by workingctx to get the list of post-dirstate-status hooks."""
2081 2080 return self._postdsstatus
2082 2081
2083 2082 def clearpostdsstatus(self):
2084 2083 """Used by workingctx to clear post-dirstate-status hooks."""
2085 2084 del self._postdsstatus[:]
2086 2085
2087 2086 def heads(self, start=None):
2088 2087 if start is None:
2089 2088 cl = self.changelog
2090 2089 headrevs = reversed(cl.headrevs())
2091 2090 return [cl.node(rev) for rev in headrevs]
2092 2091
2093 2092 heads = self.changelog.heads(start)
2094 2093 # sort the output in rev descending order
2095 2094 return sorted(heads, key=self.changelog.rev, reverse=True)
2096 2095
2097 2096 def branchheads(self, branch=None, start=None, closed=False):
2098 2097 '''return a (possibly filtered) list of heads for the given branch
2099 2098
2100 2099 Heads are returned in topological order, from newest to oldest.
2101 2100 If branch is None, use the dirstate branch.
2102 2101 If start is not None, return only heads reachable from start.
2103 2102 If closed is True, return heads that are marked as closed as well.
2104 2103 '''
2105 2104 if branch is None:
2106 2105 branch = self[None].branch()
2107 2106 branches = self.branchmap()
2108 2107 if branch not in branches:
2109 2108 return []
2110 2109 # the cache returns heads ordered lowest to highest
2111 2110 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2112 2111 if start is not None:
2113 2112 # filter out the heads that cannot be reached from startrev
2114 2113 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2115 2114 bheads = [h for h in bheads if h in fbheads]
2116 2115 return bheads
2117 2116
2118 2117 def branches(self, nodes):
2119 2118 if not nodes:
2120 2119 nodes = [self.changelog.tip()]
2121 2120 b = []
2122 2121 for n in nodes:
2123 2122 t = n
2124 2123 while True:
2125 2124 p = self.changelog.parents(n)
2126 2125 if p[1] != nullid or p[0] == nullid:
2127 2126 b.append((t, n, p[0], p[1]))
2128 2127 break
2129 2128 n = p[0]
2130 2129 return b
2131 2130
2132 2131 def between(self, pairs):
2133 2132 r = []
2134 2133
2135 2134 for top, bottom in pairs:
2136 2135 n, l, i = top, [], 0
2137 2136 f = 1
2138 2137
2139 2138 while n != bottom and n != nullid:
2140 2139 p = self.changelog.parents(n)[0]
2141 2140 if i == f:
2142 2141 l.append(n)
2143 2142 f = f * 2
2144 2143 n = p
2145 2144 i += 1
2146 2145
2147 2146 r.append(l)
2148 2147
2149 2148 return r
2150 2149
2151 2150 def checkpush(self, pushop):
2152 2151 """Extensions can override this function if additional checks have
2153 2152 to be performed before pushing, or call it if they override push
2154 2153 command.
2155 2154 """
2156 2155
2157 2156 @unfilteredpropertycache
2158 2157 def prepushoutgoinghooks(self):
2159 2158 """Return util.hooks consists of a pushop with repo, remote, outgoing
2160 2159 methods, which are called before pushing changesets.
2161 2160 """
2162 2161 return util.hooks()
2163 2162
2164 2163 def pushkey(self, namespace, key, old, new):
2165 2164 try:
2166 2165 tr = self.currenttransaction()
2167 2166 hookargs = {}
2168 2167 if tr is not None:
2169 2168 hookargs.update(tr.hookargs)
2170 2169 hookargs['namespace'] = namespace
2171 2170 hookargs['key'] = key
2172 2171 hookargs['old'] = old
2173 2172 hookargs['new'] = new
2174 2173 self.hook('prepushkey', throw=True, **hookargs)
2175 2174 except error.HookAbort as exc:
2176 2175 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2177 2176 if exc.hint:
2178 2177 self.ui.write_err(_("(%s)\n") % exc.hint)
2179 2178 return False
2180 2179 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2181 2180 ret = pushkey.push(self, namespace, key, old, new)
2182 2181 def runhook():
2183 2182 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2184 2183 ret=ret)
2185 2184 self._afterlock(runhook)
2186 2185 return ret
2187 2186
2188 2187 def listkeys(self, namespace):
2189 2188 self.hook('prelistkeys', throw=True, namespace=namespace)
2190 2189 self.ui.debug('listing keys for "%s"\n' % namespace)
2191 2190 values = pushkey.list(self, namespace)
2192 2191 self.hook('listkeys', namespace=namespace, values=values)
2193 2192 return values
2194 2193
2195 2194 def debugwireargs(self, one, two, three=None, four=None, five=None):
2196 2195 '''used to test argument passing over the wire'''
2197 2196 return "%s %s %s %s %s" % (one, two, three, four, five)
2198 2197
2199 2198 def savecommitmessage(self, text):
2200 2199 fp = self.vfs('last-message.txt', 'wb')
2201 2200 try:
2202 2201 fp.write(text)
2203 2202 finally:
2204 2203 fp.close()
2205 2204 return self.pathto(fp.name[len(self.root) + 1:])
2206 2205
2207 2206 # used to avoid circular references so destructors work
2208 2207 def aftertrans(files):
2209 2208 renamefiles = [tuple(t) for t in files]
2210 2209 def a():
2211 2210 for vfs, src, dest in renamefiles:
2212 2211 # if src and dest refer to a same file, vfs.rename is a no-op,
2213 2212 # leaving both src and dest on disk. delete dest to make sure
2214 2213 # the rename couldn't be such a no-op.
2215 2214 vfs.tryunlink(dest)
2216 2215 try:
2217 2216 vfs.rename(src, dest)
2218 2217 except OSError: # journal file does not yet exist
2219 2218 pass
2220 2219 return a
2221 2220
2222 2221 def undoname(fn):
2223 2222 base, name = os.path.split(fn)
2224 2223 assert name.startswith('journal')
2225 2224 return os.path.join(base, name.replace('journal', 'undo', 1))
2226 2225
2227 2226 def instance(ui, path, create):
2228 2227 return localrepository(ui, util.urllocalpath(path), create)
2229 2228
2230 2229 def islocal(path):
2231 2230 return True
2232 2231
2233 2232 def newreporequirements(repo):
2234 2233 """Determine the set of requirements for a new local repository.
2235 2234
2236 2235 Extensions can wrap this function to specify custom requirements for
2237 2236 new repositories.
2238 2237 """
2239 2238 ui = repo.ui
2240 2239 requirements = {'revlogv1'}
2241 2240 if ui.configbool('format', 'usestore'):
2242 2241 requirements.add('store')
2243 2242 if ui.configbool('format', 'usefncache'):
2244 2243 requirements.add('fncache')
2245 2244 if ui.configbool('format', 'dotencode'):
2246 2245 requirements.add('dotencode')
2247 2246
2248 2247 compengine = ui.config('experimental', 'format.compression')
2249 2248 if compengine not in util.compengines:
2250 2249 raise error.Abort(_('compression engine %s defined by '
2251 2250 'experimental.format.compression not available') %
2252 2251 compengine,
2253 2252 hint=_('run "hg debuginstall" to list available '
2254 2253 'compression engines'))
2255 2254
2256 2255 # zlib is the historical default and doesn't need an explicit requirement.
2257 2256 if compengine != 'zlib':
2258 2257 requirements.add('exp-compression-%s' % compengine)
2259 2258
2260 2259 if scmutil.gdinitconfig(ui):
2261 2260 requirements.add('generaldelta')
2262 2261 if ui.configbool('experimental', 'treemanifest'):
2263 2262 requirements.add('treemanifest')
2264 2263 if ui.configbool('experimental', 'manifestv2'):
2265 2264 requirements.add('manifestv2')
2266 2265
2267 2266 revlogv2 = ui.config('experimental', 'revlogv2')
2268 2267 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2269 2268 requirements.remove('revlogv1')
2270 2269 # generaldelta is implied by revlogv2.
2271 2270 requirements.discard('generaldelta')
2272 2271 requirements.add(REVLOGV2_REQUIREMENT)
2273 2272
2274 2273 return requirements
@@ -1,1010 +1,1008
1 1 #require killdaemons
2 2
3 3 $ cat <<EOF >> $HGRCPATH
4 4 > [extensions]
5 5 > transplant=
6 6 > EOF
7 7
8 8 $ hg init t
9 9 $ cd t
10 10 $ hg transplant
11 11 abort: no source URL, branch revision, or revision list provided
12 12 [255]
13 13 $ hg transplant --continue --all
14 14 abort: --continue is incompatible with --branch, --all and --merge
15 15 [255]
16 16 $ hg transplant --all tip
17 17 abort: --all requires a branch revision
18 18 [255]
19 19 $ hg transplant --all --branch default tip
20 20 abort: --all is incompatible with a revision list
21 21 [255]
22 22 $ echo r1 > r1
23 23 $ hg ci -Amr1 -d'0 0'
24 24 adding r1
25 25 $ hg co -q null
26 26 $ hg transplant tip
27 27 abort: no revision checked out
28 28 [255]
29 29 $ hg up -q
30 30 $ echo r2 > r2
31 31 $ hg ci -Amr2 -d'1 0'
32 32 adding r2
33 33 $ hg up 0
34 34 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
35 35
36 36 $ echo b1 > b1
37 37 $ hg ci -Amb1 -d '0 0'
38 38 adding b1
39 39 created new head
40 40 $ hg merge 1
41 41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 (branch merge, don't forget to commit)
43 43 $ hg transplant 1
44 44 abort: outstanding uncommitted merges
45 45 [255]
46 46 $ hg up -qC tip
47 47 $ echo b0 > b1
48 48 $ hg transplant 1
49 49 abort: outstanding local changes
50 50 [255]
51 51 $ hg up -qC tip
52 52 $ echo b2 > b2
53 53 $ hg ci -Amb2 -d '1 0'
54 54 adding b2
55 55 $ echo b3 > b3
56 56 $ hg ci -Amb3 -d '2 0'
57 57 adding b3
58 58
59 59 $ hg log --template '{rev} {parents} {desc}\n'
60 60 4 b3
61 61 3 b2
62 62 2 0:17ab29e464c6 b1
63 63 1 r2
64 64 0 r1
65 65
66 66 $ hg clone . ../rebase
67 67 updating to branch default
68 68 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 69 $ hg init ../emptydest
70 70 $ cd ../emptydest
71 71 $ hg transplant --source=../t > /dev/null
72 72 $ cd ../rebase
73 73
74 74 $ hg up -C 1
75 75 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
76 76
77 77 rebase b onto r1
78 78 (this also tests that editor is not invoked if '--edit' is not specified)
79 79
80 80 $ HGEDITOR=cat hg transplant -a -b tip
81 81 applying 37a1297eb21b
82 82 37a1297eb21b transplanted to e234d668f844
83 83 applying 722f4667af76
84 84 722f4667af76 transplanted to 539f377d78df
85 85 applying a53251cdf717
86 86 a53251cdf717 transplanted to ffd6818a3975
87 87 $ hg log --template '{rev} {parents} {desc}\n'
88 88 7 b3
89 89 6 b2
90 90 5 1:d11e3596cc1a b1
91 91 4 b3
92 92 3 b2
93 93 2 0:17ab29e464c6 b1
94 94 1 r2
95 95 0 r1
96 96
97 97 test format of transplant_source
98 98
99 99 $ hg log -r7 --debug | grep transplant_source
100 100 extra: transplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
101 101 $ hg log -r7 -T '{extras}\n'
102 102 branch=defaulttransplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
103 103 $ hg log -r7 -T '{join(extras, " ")}\n'
104 104 branch=default transplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
105 105
106 106 test transplanted revset
107 107
108 108 $ hg log -r 'transplanted()' --template '{rev} {parents} {desc}\n'
109 109 5 1:d11e3596cc1a b1
110 110 6 b2
111 111 7 b3
112 112 $ hg log -r 'transplanted(head())' --template '{rev} {parents} {desc}\n'
113 113 7 b3
114 114 $ hg help revisions.transplanted
115 115 "transplanted([set])"
116 116 Transplanted changesets in set, or all transplanted changesets.
117 117
118 118
119 119 test transplanted keyword
120 120
121 121 $ hg log --template '{rev} {transplanted}\n'
122 122 7 a53251cdf717679d1907b289f991534be05c997a
123 123 6 722f4667af767100cb15b6a79324bf8abbfe1ef4
124 124 5 37a1297eb21b3ef5c5d2ffac22121a0988ed9f21
125 125 4
126 126 3
127 127 2
128 128 1
129 129 0
130 130
131 131 test destination() revset predicate with a transplant of a transplant; new
132 132 clone so subsequent rollback isn't affected
133 133 (this also tests that editor is invoked if '--edit' is specified)
134 134
135 135 $ hg clone -q . ../destination
136 136 $ cd ../destination
137 137 $ hg up -Cq 0
138 138 $ hg branch -q b4
139 139 $ hg ci -qm "b4"
140 140 $ hg status --rev "7^1" --rev 7
141 141 A b3
142 142 $ cat > $TESTTMP/checkeditform.sh <<EOF
143 143 > env | grep HGEDITFORM
144 144 > true
145 145 > EOF
146 146 $ cat > $TESTTMP/checkeditform-n-cat.sh <<EOF
147 147 > env | grep HGEDITFORM
148 148 > cat \$*
149 149 > EOF
150 150 $ HGEDITOR="sh $TESTTMP/checkeditform-n-cat.sh" hg transplant --edit 7
151 151 applying ffd6818a3975
152 152 HGEDITFORM=transplant.normal
153 153 b3
154 154
155 155
156 156 HG: Enter commit message. Lines beginning with 'HG:' are removed.
157 157 HG: Leave message empty to abort commit.
158 158 HG: --
159 159 HG: user: test
160 160 HG: branch 'b4'
161 161 HG: added b3
162 162 ffd6818a3975 transplanted to 502236fa76bb
163 163
164 164
165 165 $ hg log -r 'destination()'
166 166 changeset: 5:e234d668f844
167 167 parent: 1:d11e3596cc1a
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: b1
171 171
172 172 changeset: 6:539f377d78df
173 173 user: test
174 174 date: Thu Jan 01 00:00:01 1970 +0000
175 175 summary: b2
176 176
177 177 changeset: 7:ffd6818a3975
178 178 user: test
179 179 date: Thu Jan 01 00:00:02 1970 +0000
180 180 summary: b3
181 181
182 182 changeset: 9:502236fa76bb
183 183 branch: b4
184 184 tag: tip
185 185 user: test
186 186 date: Thu Jan 01 00:00:02 1970 +0000
187 187 summary: b3
188 188
189 189 $ hg log -r 'destination(a53251cdf717)'
190 190 changeset: 7:ffd6818a3975
191 191 user: test
192 192 date: Thu Jan 01 00:00:02 1970 +0000
193 193 summary: b3
194 194
195 195 changeset: 9:502236fa76bb
196 196 branch: b4
197 197 tag: tip
198 198 user: test
199 199 date: Thu Jan 01 00:00:02 1970 +0000
200 200 summary: b3
201 201
202 202
203 203 test subset parameter in reverse order
204 204 $ hg log -r 'reverse(all()) and destination(a53251cdf717)'
205 205 changeset: 9:502236fa76bb
206 206 branch: b4
207 207 tag: tip
208 208 user: test
209 209 date: Thu Jan 01 00:00:02 1970 +0000
210 210 summary: b3
211 211
212 212 changeset: 7:ffd6818a3975
213 213 user: test
214 214 date: Thu Jan 01 00:00:02 1970 +0000
215 215 summary: b3
216 216
217 217
218 218 back to the original dir
219 219 $ cd ../rebase
220 220
221 221 rollback the transplant
222 222 $ hg rollback
223 223 repository tip rolled back to revision 4 (undo transplant)
224 224 working directory now based on revision 1
225 225 $ hg tip -q
226 226 4:a53251cdf717
227 227 $ hg parents -q
228 228 1:d11e3596cc1a
229 229 $ hg status
230 230 ? b1
231 231 ? b2
232 232 ? b3
233 233
234 234 $ hg clone ../t ../prune
235 235 updating to branch default
236 236 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
237 237 $ cd ../prune
238 238
239 239 $ hg up -C 1
240 240 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
241 241
242 242 rebase b onto r1, skipping b2
243 243
244 244 $ hg transplant -a -b tip -p 3
245 245 applying 37a1297eb21b
246 246 37a1297eb21b transplanted to e234d668f844
247 247 applying a53251cdf717
248 248 a53251cdf717 transplanted to 7275fda4d04f
249 249 $ hg log --template '{rev} {parents} {desc}\n'
250 250 6 b3
251 251 5 1:d11e3596cc1a b1
252 252 4 b3
253 253 3 b2
254 254 2 0:17ab29e464c6 b1
255 255 1 r2
256 256 0 r1
257 257
258 258 test same-parent transplant with --log
259 259
260 260 $ hg clone -r 1 ../t ../sameparent
261 261 adding changesets
262 262 adding manifests
263 263 adding file changes
264 264 added 2 changesets with 2 changes to 2 files
265 265 new changesets 17ab29e464c6:d11e3596cc1a
266 266 updating to branch default
267 267 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
268 268 $ cd ../sameparent
269 269 $ hg transplant --log -s ../prune 5
270 270 searching for changes
271 271 applying e234d668f844
272 272 e234d668f844 transplanted to e07aea8ecf9c
273 273 $ hg log --template '{rev} {parents} {desc}\n'
274 274 2 b1
275 275 (transplanted from e234d668f844e1b1a765f01db83a32c0c7bfa170)
276 276 1 r2
277 277 0 r1
278 278 remote transplant, and also test that transplant doesn't break with
279 279 format-breaking diffopts
280 280
281 281 $ hg clone -r 1 ../t ../remote
282 282 adding changesets
283 283 adding manifests
284 284 adding file changes
285 285 added 2 changesets with 2 changes to 2 files
286 286 new changesets 17ab29e464c6:d11e3596cc1a
287 287 updating to branch default
288 288 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
289 289 $ cd ../remote
290 290 $ hg --config diff.noprefix=True transplant --log -s ../t 2 4
291 291 searching for changes
292 292 applying 37a1297eb21b
293 293 37a1297eb21b transplanted to c19cf0ccb069
294 294 applying a53251cdf717
295 295 a53251cdf717 transplanted to f7fe5bf98525
296 296 $ hg log --template '{rev} {parents} {desc}\n'
297 297 3 b3
298 298 (transplanted from a53251cdf717679d1907b289f991534be05c997a)
299 299 2 b1
300 300 (transplanted from 37a1297eb21b3ef5c5d2ffac22121a0988ed9f21)
301 301 1 r2
302 302 0 r1
303 303
304 304 skip previous transplants
305 305
306 306 $ hg transplant -s ../t -a -b 4
307 307 searching for changes
308 308 applying 722f4667af76
309 309 722f4667af76 transplanted to 47156cd86c0b
310 310 $ hg log --template '{rev} {parents} {desc}\n'
311 311 4 b2
312 312 3 b3
313 313 (transplanted from a53251cdf717679d1907b289f991534be05c997a)
314 314 2 b1
315 315 (transplanted from 37a1297eb21b3ef5c5d2ffac22121a0988ed9f21)
316 316 1 r2
317 317 0 r1
318 318
319 319 skip local changes transplanted to the source
320 320
321 321 $ echo b4 > b4
322 322 $ hg ci -Amb4 -d '3 0'
323 323 adding b4
324 324 $ hg clone ../t ../pullback
325 325 updating to branch default
326 326 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
327 327 $ cd ../pullback
328 328 $ hg transplant -s ../remote -a -b tip
329 329 searching for changes
330 330 applying 4333daefcb15
331 331 4333daefcb15 transplanted to 5f42c04e07cc
332 332
333 333
334 334 remote transplant with pull
335 335
336 336 $ hg serve -R ../t -p $HGPORT -d --pid-file=../t.pid
337 337 $ cat ../t.pid >> $DAEMON_PIDS
338 338
339 339 $ hg clone -r 0 ../t ../rp
340 340 adding changesets
341 341 adding manifests
342 342 adding file changes
343 343 added 1 changesets with 1 changes to 1 files
344 344 new changesets 17ab29e464c6
345 345 updating to branch default
346 346 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 347 $ cd ../rp
348 348 $ hg transplant -s http://localhost:$HGPORT/ 37a1297eb21b a53251cdf717
349 349 searching for changes
350 350 searching for changes
351 351 adding changesets
352 352 adding manifests
353 353 adding file changes
354 354 added 1 changesets with 1 changes to 1 files
355 355 applying a53251cdf717
356 356 a53251cdf717 transplanted to 8d9279348abb
357 new changesets 37a1297eb21b:8d9279348abb
358 357 $ hg log --template '{rev} {parents} {desc}\n'
359 358 2 b3
360 359 1 b1
361 360 0 r1
362 361
363 362 remote transplant without pull
364 363 (It was using "2" and "4" (as the previous transplant used to) which referenced
365 364 revision different from one run to another)
366 365
367 366 $ hg pull -q http://localhost:$HGPORT/
368 367 $ hg transplant -s http://localhost:$HGPORT/ 8d9279348abb 722f4667af76
369 368 skipping already applied revision 2:8d9279348abb
370 369 applying 722f4667af76
371 370 722f4667af76 transplanted to 76e321915884
372 371
373 372 transplant --continue
374 373
375 374 $ hg init ../tc
376 375 $ cd ../tc
377 376 $ cat <<EOF > foo
378 377 > foo
379 378 > bar
380 379 > baz
381 380 > EOF
382 381 $ echo toremove > toremove
383 382 $ echo baz > baz
384 383 $ hg ci -Amfoo
385 384 adding baz
386 385 adding foo
387 386 adding toremove
388 387 $ cat <<EOF > foo
389 388 > foo2
390 389 > bar2
391 390 > baz2
392 391 > EOF
393 392 $ rm toremove
394 393 $ echo added > added
395 394 $ hg ci -Amfoo2
396 395 adding added
397 396 removing toremove
398 397 $ echo bar > bar
399 398 $ cat > baz <<EOF
400 399 > before baz
401 400 > baz
402 401 > after baz
403 402 > EOF
404 403 $ hg ci -Ambar
405 404 adding bar
406 405 $ echo bar2 >> bar
407 406 $ hg ci -mbar2
408 407 $ hg up 0
409 408 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
410 409 $ echo foobar > foo
411 410 $ hg ci -mfoobar
412 411 created new head
413 412 $ hg transplant 1:3
414 413 applying 46ae92138f3c
415 414 patching file foo
416 415 Hunk #1 FAILED at 0
417 416 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
418 417 patch failed to apply
419 418 abort: fix up the working directory and run hg transplant --continue
420 419 [255]
421 420
422 421 transplant -c shouldn't use an old changeset
423 422
424 423 $ hg up -C
425 424 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
426 425 updated to "e8643552fde5: foobar"
427 426 1 other heads for branch "default"
428 427 $ rm added
429 428 $ hg transplant --continue
430 429 abort: no transplant to continue
431 430 [255]
432 431 $ hg transplant 1
433 432 applying 46ae92138f3c
434 433 patching file foo
435 434 Hunk #1 FAILED at 0
436 435 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
437 436 patch failed to apply
438 437 abort: fix up the working directory and run hg transplant --continue
439 438 [255]
440 439 $ cp .hg/transplant/journal .hg/transplant/journal.orig
441 440 $ cat .hg/transplant/journal
442 441 # User test
443 442 # Date 0 0
444 443 # Node ID 46ae92138f3ce0249f6789650403286ead052b6d
445 444 # Parent e8643552fde58f57515e19c4b373a57c96e62af3
446 445 foo2
447 446 $ grep -v 'Date' .hg/transplant/journal.orig > .hg/transplant/journal
448 447 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg transplant --continue -e
449 448 abort: filter corrupted changeset (no user or date)
450 449 [255]
451 450 $ cp .hg/transplant/journal.orig .hg/transplant/journal
452 451 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg transplant --continue -e
453 452 HGEDITFORM=transplant.normal
454 453 46ae92138f3c transplanted as 9159dada197d
455 454 $ hg transplant 1:3
456 455 skipping already applied revision 1:46ae92138f3c
457 456 applying 9d6d6b5a8275
458 457 9d6d6b5a8275 transplanted to 2d17a10c922f
459 458 applying 1dab759070cf
460 459 1dab759070cf transplanted to e06a69927eb0
461 460 $ hg locate
462 461 added
463 462 bar
464 463 baz
465 464 foo
466 465
467 466 test multiple revisions and --continue
468 467
469 468 $ hg up -qC 0
470 469 $ echo bazbaz > baz
471 470 $ hg ci -Am anotherbaz baz
472 471 created new head
473 472 $ hg transplant 1:3
474 473 applying 46ae92138f3c
475 474 46ae92138f3c transplanted to 1024233ea0ba
476 475 applying 9d6d6b5a8275
477 476 patching file baz
478 477 Hunk #1 FAILED at 0
479 478 1 out of 1 hunks FAILED -- saving rejects to file baz.rej
480 479 patch failed to apply
481 480 abort: fix up the working directory and run hg transplant --continue
482 481 [255]
483 482 $ hg transplant 1:3
484 483 abort: transplant in progress
485 484 (use 'hg transplant --continue' or 'hg update' to abort)
486 485 [255]
487 486 $ echo fixed > baz
488 487 $ hg transplant --continue
489 488 9d6d6b5a8275 transplanted as d80c49962290
490 489 applying 1dab759070cf
491 490 1dab759070cf transplanted to aa0ffe6bd5ae
492 491
493 492 $ cd ..
494 493
495 494 Issue1111: Test transplant --merge
496 495
497 496 $ hg init t1111
498 497 $ cd t1111
499 498 $ echo a > a
500 499 $ hg ci -Am adda
501 500 adding a
502 501 $ echo b >> a
503 502 $ hg ci -m appendb
504 503 $ echo c >> a
505 504 $ hg ci -m appendc
506 505 $ hg up -C 0
507 506 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
508 507 $ echo d >> a
509 508 $ hg ci -m appendd
510 509 created new head
511 510
512 511 transplant
513 512
514 513 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg transplant -m 1 -e
515 514 applying 42dc4432fd35
516 515 HGEDITFORM=transplant.merge
517 516 1:42dc4432fd35 merged at a9f4acbac129
518 517 $ hg update -q -C 2
519 518 $ cat > a <<EOF
520 519 > x
521 520 > y
522 521 > z
523 522 > EOF
524 523 $ hg commit -m replace
525 524 $ hg update -q -C 4
526 525 $ hg transplant -m 5
527 526 applying 600a3cdcb41d
528 527 patching file a
529 528 Hunk #1 FAILED at 0
530 529 1 out of 1 hunks FAILED -- saving rejects to file a.rej
531 530 patch failed to apply
532 531 abort: fix up the working directory and run hg transplant --continue
533 532 [255]
534 533 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg transplant --continue -e
535 534 HGEDITFORM=transplant.merge
536 535 600a3cdcb41d transplanted as a3f88be652e0
537 536
538 537 $ cd ..
539 538
540 539 test transplant into empty repository
541 540
542 541 $ hg init empty
543 542 $ cd empty
544 543 $ hg transplant -s ../t -b tip -a
545 544 adding changesets
546 545 adding manifests
547 546 adding file changes
548 547 added 4 changesets with 4 changes to 4 files
549 548 new changesets 17ab29e464c6:a53251cdf717
550 549
551 550 test "--merge" causing pull from source repository on local host
552 551
553 552 $ hg --config extensions.mq= -q strip 2
554 553 $ hg transplant -s ../t --merge tip
555 554 searching for changes
556 555 searching for changes
557 556 adding changesets
558 557 adding manifests
559 558 adding file changes
560 559 added 2 changesets with 2 changes to 2 files
561 560 applying a53251cdf717
562 561 4:a53251cdf717 merged at 4831f4dc831a
563 new changesets 722f4667af76:4831f4dc831a
564 562
565 563 test interactive transplant
566 564
567 565 $ hg --config extensions.strip= -q strip 0
568 566 $ hg -R ../t log -G --template "{rev}:{node|short}"
569 567 @ 4:a53251cdf717
570 568 |
571 569 o 3:722f4667af76
572 570 |
573 571 o 2:37a1297eb21b
574 572 |
575 573 | o 1:d11e3596cc1a
576 574 |/
577 575 o 0:17ab29e464c6
578 576
579 577 $ hg transplant -q --config ui.interactive=true -s ../t <<EOF
580 578 > ?
581 579 > x
582 580 > q
583 581 > EOF
584 582 0:17ab29e464c6
585 583 apply changeset? [ynmpcq?]: ?
586 584 y: yes, transplant this changeset
587 585 n: no, skip this changeset
588 586 m: merge at this changeset
589 587 p: show patch
590 588 c: commit selected changesets
591 589 q: quit and cancel transplant
592 590 ?: ? (show this help)
593 591 apply changeset? [ynmpcq?]: x
594 592 unrecognized response
595 593 apply changeset? [ynmpcq?]: q
596 594 $ hg transplant -q --config ui.interactive=true -s ../t <<EOF
597 595 > p
598 596 > y
599 597 > n
600 598 > n
601 599 > m
602 600 > c
603 601 > EOF
604 602 0:17ab29e464c6
605 603 apply changeset? [ynmpcq?]: p
606 604 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
607 605 +++ b/r1 Thu Jan 01 00:00:00 1970 +0000
608 606 @@ -0,0 +1,1 @@
609 607 +r1
610 608 apply changeset? [ynmpcq?]: y
611 609 1:d11e3596cc1a
612 610 apply changeset? [ynmpcq?]: n
613 611 2:37a1297eb21b
614 612 apply changeset? [ynmpcq?]: n
615 613 3:722f4667af76
616 614 apply changeset? [ynmpcq?]: m
617 615 4:a53251cdf717
618 616 apply changeset? [ynmpcq?]: c
619 617 $ hg log -G --template "{node|short}"
620 618 @ 88be5dde5260
621 619 |\
622 620 | o 722f4667af76
623 621 | |
624 622 | o 37a1297eb21b
625 623 |/
626 624 o 17ab29e464c6
627 625
628 626 $ hg transplant -q --config ui.interactive=true -s ../t <<EOF
629 627 > x
630 628 > ?
631 629 > y
632 630 > q
633 631 > EOF
634 632 1:d11e3596cc1a
635 633 apply changeset? [ynmpcq?]: x
636 634 unrecognized response
637 635 apply changeset? [ynmpcq?]: ?
638 636 y: yes, transplant this changeset
639 637 n: no, skip this changeset
640 638 m: merge at this changeset
641 639 p: show patch
642 640 c: commit selected changesets
643 641 q: quit and cancel transplant
644 642 ?: ? (show this help)
645 643 apply changeset? [ynmpcq?]: y
646 644 4:a53251cdf717
647 645 apply changeset? [ynmpcq?]: q
648 646 $ hg heads --template "{node|short}\n"
649 647 88be5dde5260
650 648
651 649 $ cd ..
652 650
653 651
654 652 #if unix-permissions system-sh
655 653
656 654 test filter
657 655
658 656 $ hg init filter
659 657 $ cd filter
660 658 $ cat <<'EOF' >test-filter
661 659 > #!/bin/sh
662 660 > sed 's/r1/r2/' $1 > $1.new
663 661 > mv $1.new $1
664 662 > EOF
665 663 $ chmod +x test-filter
666 664 $ hg transplant -s ../t -b tip -a --filter ./test-filter
667 665 filtering * (glob)
668 666 applying 17ab29e464c6
669 667 17ab29e464c6 transplanted to e9ffc54ea104
670 668 filtering * (glob)
671 669 applying 37a1297eb21b
672 670 37a1297eb21b transplanted to 348b36d0b6a5
673 671 filtering * (glob)
674 672 applying 722f4667af76
675 673 722f4667af76 transplanted to 0aa6979afb95
676 674 filtering * (glob)
677 675 applying a53251cdf717
678 676 a53251cdf717 transplanted to 14f8512272b5
679 677 $ hg log --template '{rev} {parents} {desc}\n'
680 678 3 b3
681 679 2 b2
682 680 1 b1
683 681 0 r2
684 682 $ cd ..
685 683
686 684
687 685 test filter with failed patch
688 686
689 687 $ cd filter
690 688 $ hg up 0
691 689 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
692 690 $ echo foo > b1
693 691 $ hg ci -Am foo
694 692 adding b1
695 693 adding test-filter
696 694 created new head
697 695 $ hg transplant 1 --filter ./test-filter
698 696 filtering * (glob)
699 697 applying 348b36d0b6a5
700 698 file b1 already exists
701 699 1 out of 1 hunks FAILED -- saving rejects to file b1.rej
702 700 patch failed to apply
703 701 abort: fix up the working directory and run hg transplant --continue
704 702 [255]
705 703 $ cd ..
706 704
707 705 test environment passed to filter
708 706
709 707 $ hg init filter-environment
710 708 $ cd filter-environment
711 709 $ cat <<'EOF' >test-filter-environment
712 710 > #!/bin/sh
713 711 > echo "Transplant by $HGUSER" >> $1
714 712 > echo "Transplant from rev $HGREVISION" >> $1
715 713 > EOF
716 714 $ chmod +x test-filter-environment
717 715 $ hg transplant -s ../t --filter ./test-filter-environment 0
718 716 filtering * (glob)
719 717 applying 17ab29e464c6
720 718 17ab29e464c6 transplanted to 5190e68026a0
721 719
722 720 $ hg log --template '{rev} {parents} {desc}\n'
723 721 0 r1
724 722 Transplant by test
725 723 Transplant from rev 17ab29e464c6ca53e329470efe2a9918ac617a6f
726 724 $ cd ..
727 725
728 726 test transplant with filter handles invalid changelog
729 727
730 728 $ hg init filter-invalid-log
731 729 $ cd filter-invalid-log
732 730 $ cat <<'EOF' >test-filter-invalid-log
733 731 > #!/bin/sh
734 732 > echo "" > $1
735 733 > EOF
736 734 $ chmod +x test-filter-invalid-log
737 735 $ hg transplant -s ../t --filter ./test-filter-invalid-log 0
738 736 filtering * (glob)
739 737 abort: filter corrupted changeset (no user or date)
740 738 [255]
741 739 $ cd ..
742 740
743 741 #endif
744 742
745 743
746 744 test with a win32ext like setup (differing EOLs)
747 745
748 746 $ hg init twin1
749 747 $ cd twin1
750 748 $ echo a > a
751 749 $ echo b > b
752 750 $ echo b >> b
753 751 $ hg ci -Am t
754 752 adding a
755 753 adding b
756 754 $ echo a > b
757 755 $ echo b >> b
758 756 $ hg ci -m changeb
759 757 $ cd ..
760 758
761 759 $ hg init twin2
762 760 $ cd twin2
763 761 $ echo '[patch]' >> .hg/hgrc
764 762 $ echo 'eol = crlf' >> .hg/hgrc
765 763 $ $PYTHON -c "file('b', 'wb').write('b\r\nb\r\n')"
766 764 $ hg ci -Am addb
767 765 adding b
768 766 $ hg transplant -s ../twin1 tip
769 767 searching for changes
770 768 warning: repository is unrelated
771 769 applying 2e849d776c17
772 770 2e849d776c17 transplanted to 8e65bebc063e
773 771 $ cat b
774 772 a\r (esc)
775 773 b\r (esc)
776 774 $ cd ..
777 775
778 776 test transplant with merge changeset is skipped
779 777
780 778 $ hg init merge1a
781 779 $ cd merge1a
782 780 $ echo a > a
783 781 $ hg ci -Am a
784 782 adding a
785 783 $ hg branch b
786 784 marked working directory as branch b
787 785 (branches are permanent and global, did you want a bookmark?)
788 786 $ hg ci -m branchb
789 787 $ echo b > b
790 788 $ hg ci -Am b
791 789 adding b
792 790 $ hg update default
793 791 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
794 792 $ hg merge b
795 793 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
796 794 (branch merge, don't forget to commit)
797 795 $ hg ci -m mergeb
798 796 $ cd ..
799 797
800 798 $ hg init merge1b
801 799 $ cd merge1b
802 800 $ hg transplant -s ../merge1a tip
803 801 $ cd ..
804 802
805 803 test transplant with merge changeset accepts --parent
806 804
807 805 $ hg init merge2a
808 806 $ cd merge2a
809 807 $ echo a > a
810 808 $ hg ci -Am a
811 809 adding a
812 810 $ hg branch b
813 811 marked working directory as branch b
814 812 (branches are permanent and global, did you want a bookmark?)
815 813 $ hg ci -m branchb
816 814 $ echo b > b
817 815 $ hg ci -Am b
818 816 adding b
819 817 $ hg update default
820 818 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
821 819 $ hg merge b
822 820 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
823 821 (branch merge, don't forget to commit)
824 822 $ hg ci -m mergeb
825 823 $ cd ..
826 824
827 825 $ hg init merge2b
828 826 $ cd merge2b
829 827 $ hg transplant -s ../merge2a --parent tip tip
830 828 abort: be9f9b39483f is not a parent of be9f9b39483f
831 829 [255]
832 830 $ hg transplant -s ../merge2a --parent 0 tip
833 831 applying be9f9b39483f
834 832 be9f9b39483f transplanted to 9959e51f94d1
835 833 $ cd ..
836 834
837 835 test transplanting a patch turning into a no-op
838 836
839 837 $ hg init binarysource
840 838 $ cd binarysource
841 839 $ echo a > a
842 840 $ hg ci -Am adda a
843 841 >>> file('b', 'wb').write('\0b1')
844 842 $ hg ci -Am addb b
845 843 >>> file('b', 'wb').write('\0b2')
846 844 $ hg ci -m changeb b
847 845 $ cd ..
848 846
849 847 $ hg clone -r0 binarysource binarydest
850 848 adding changesets
851 849 adding manifests
852 850 adding file changes
853 851 added 1 changesets with 1 changes to 1 files
854 852 new changesets 07f494440405
855 853 updating to branch default
856 854 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
857 855 $ cd binarydest
858 856 $ cp ../binarysource/b b
859 857 $ hg ci -Am addb2 b
860 858 $ hg transplant -s ../binarysource 2
861 859 searching for changes
862 860 applying 7a7d57e15850
863 861 skipping emptied changeset 7a7d57e15850
864 862
865 863 Test empty result in --continue
866 864
867 865 $ hg transplant -s ../binarysource 1
868 866 searching for changes
869 867 applying 645035761929
870 868 file b already exists
871 869 1 out of 1 hunks FAILED -- saving rejects to file b.rej
872 870 patch failed to apply
873 871 abort: fix up the working directory and run hg transplant --continue
874 872 [255]
875 873 $ hg status
876 874 ? b.rej
877 875 $ hg transplant --continue
878 876 645035761929 skipped due to empty diff
879 877
880 878 $ cd ..
881 879
882 880 Explicitly kill daemons to let the test exit on Windows
883 881
884 882 $ killdaemons.py
885 883
886 884 Test that patch-ed files are treated as "modified", when transplant is
887 885 aborted by failure of patching, even if none of mode, size and
888 886 timestamp of them isn't changed on the filesystem (see also issue4583)
889 887
890 888 $ cd t
891 889
892 890 $ cat > $TESTTMP/abort.py <<EOF
893 891 > # emulate that patch.patch() is aborted at patching on "abort" file
894 892 > from mercurial import error, extensions, patch as patchmod
895 893 > def patch(orig, ui, repo, patchname,
896 894 > strip=1, prefix='', files=None,
897 895 > eolmode='strict', similarity=0):
898 896 > if files is None:
899 897 > files = set()
900 898 > r = orig(ui, repo, patchname,
901 899 > strip=strip, prefix=prefix, files=files,
902 900 > eolmode=eolmode, similarity=similarity)
903 901 > if 'abort' in files:
904 902 > raise error.PatchError('intentional error while patching')
905 903 > return r
906 904 > def extsetup(ui):
907 905 > extensions.wrapfunction(patchmod, 'patch', patch)
908 906 > EOF
909 907
910 908 $ echo X1 > r1
911 909 $ hg diff --nodates r1
912 910 diff -r a53251cdf717 r1
913 911 --- a/r1
914 912 +++ b/r1
915 913 @@ -1,1 +1,1 @@
916 914 -r1
917 915 +X1
918 916 $ hg commit -m "X1 as r1"
919 917
920 918 $ echo 'marking to abort patching' > abort
921 919 $ hg add abort
922 920 $ echo Y1 > r1
923 921 $ hg diff --nodates r1
924 922 diff -r 22c515968f13 r1
925 923 --- a/r1
926 924 +++ b/r1
927 925 @@ -1,1 +1,1 @@
928 926 -X1
929 927 +Y1
930 928 $ hg commit -m "Y1 as r1"
931 929
932 930 $ hg update -q -C d11e3596cc1a
933 931 $ cat r1
934 932 r1
935 933
936 934 $ cat >> .hg/hgrc <<EOF
937 935 > [fakedirstatewritetime]
938 936 > # emulate invoking dirstate.write() via repo.status() or markcommitted()
939 937 > # at 2000-01-01 00:00
940 938 > fakenow = 200001010000
941 939 >
942 940 > # emulate invoking patch.internalpatch() at 2000-01-01 00:00
943 941 > [fakepatchtime]
944 942 > fakenow = 200001010000
945 943 >
946 944 > [extensions]
947 945 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
948 946 > fakepatchtime = $TESTDIR/fakepatchtime.py
949 947 > abort = $TESTTMP/abort.py
950 948 > EOF
951 949 $ hg transplant "22c515968f13::"
952 950 applying 22c515968f13
953 951 22c515968f13 transplanted to * (glob)
954 952 applying e38700ba9dd3
955 953 intentional error while patching
956 954 abort: fix up the working directory and run hg transplant --continue
957 955 [255]
958 956 $ cat >> .hg/hgrc <<EOF
959 957 > [hooks]
960 958 > fakedirstatewritetime = !
961 959 > fakepatchtime = !
962 960 > [extensions]
963 961 > abort = !
964 962 > EOF
965 963
966 964 $ cat r1
967 965 Y1
968 966 $ hg debugstate | grep ' r1$'
969 967 n 644 3 unset r1
970 968 $ hg status -A r1
971 969 M r1
972 970
973 971 Test that rollback by unexpected failure after transplanting the first
974 972 revision restores dirstate correctly.
975 973
976 974 $ hg rollback -q
977 975 $ rm -f abort
978 976 $ hg update -q -C d11e3596cc1a
979 977 $ hg parents -T "{node|short}\n"
980 978 d11e3596cc1a
981 979 $ hg status -A
982 980 C r1
983 981 C r2
984 982
985 983 $ cat >> .hg/hgrc <<EOF
986 984 > [hooks]
987 985 > # emulate failure at transplanting the 2nd revision
988 986 > pretxncommit.abort = test ! -f abort
989 987 > EOF
990 988 $ hg transplant "22c515968f13::"
991 989 applying 22c515968f13
992 990 22c515968f13 transplanted to * (glob)
993 991 applying e38700ba9dd3
994 992 transaction abort!
995 993 rollback completed
996 994 abort: pretxncommit.abort hook exited with status 1
997 995 [255]
998 996 $ cat >> .hg/hgrc <<EOF
999 997 > [hooks]
1000 998 > pretxncommit.abort = !
1001 999 > EOF
1002 1000
1003 1001 $ hg parents -T "{node|short}\n"
1004 1002 d11e3596cc1a
1005 1003 $ hg status -A
1006 1004 M r1
1007 1005 ? abort
1008 1006 C r2
1009 1007
1010 1008 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now