##// END OF EJS Templates
merge: introduce a merge() for that use-case...
Martin von Zweigbergk -
r44883:a45ffad9 default
parent child Browse files
Show More
@@ -1,1449 +1,1442 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import getattr
19 19
20 20 from . import (
21 21 bookmarks,
22 22 bundlerepo,
23 23 cacheutil,
24 24 cmdutil,
25 25 destutil,
26 26 discovery,
27 27 error,
28 28 exchange,
29 29 extensions,
30 30 httppeer,
31 31 localrepo,
32 32 lock,
33 33 logcmdutil,
34 34 logexchange,
35 35 merge as mergemod,
36 36 narrowspec,
37 37 node,
38 38 phases,
39 39 pycompat,
40 40 scmutil,
41 41 sshpeer,
42 42 statichttprepo,
43 43 ui as uimod,
44 44 unionrepo,
45 45 url,
46 46 util,
47 47 verify as verifymod,
48 48 vfs as vfsmod,
49 49 )
50 50 from .utils import hashutil
51 51 from .interfaces import repository as repositorymod
52 52
53 53 release = lock.release
54 54
55 55 # shared features
56 56 sharedbookmarks = b'bookmarks'
57 57
58 58
59 59 def _local(path):
60 60 path = util.expandpath(util.urllocalpath(path))
61 61
62 62 try:
63 63 isfile = os.path.isfile(path)
64 64 # Python 2 raises TypeError, Python 3 ValueError.
65 65 except (TypeError, ValueError) as e:
66 66 raise error.Abort(
67 67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 68 )
69 69
70 70 return isfile and bundlerepo or localrepo
71 71
72 72
73 73 def addbranchrevs(lrepo, other, branches, revs):
74 74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 75 hashbranch, branches = branches
76 76 if not hashbranch and not branches:
77 77 x = revs or None
78 78 if revs:
79 79 y = revs[0]
80 80 else:
81 81 y = None
82 82 return x, y
83 83 if revs:
84 84 revs = list(revs)
85 85 else:
86 86 revs = []
87 87
88 88 if not peer.capable(b'branchmap'):
89 89 if branches:
90 90 raise error.Abort(_(b"remote branch lookup not supported"))
91 91 revs.append(hashbranch)
92 92 return revs, revs[0]
93 93
94 94 with peer.commandexecutor() as e:
95 95 branchmap = e.callcommand(b'branchmap', {}).result()
96 96
97 97 def primary(branch):
98 98 if branch == b'.':
99 99 if not lrepo:
100 100 raise error.Abort(_(b"dirstate branch not accessible"))
101 101 branch = lrepo.dirstate.branch()
102 102 if branch in branchmap:
103 103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 104 return True
105 105 else:
106 106 return False
107 107
108 108 for branch in branches:
109 109 if not primary(branch):
110 110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 111 if hashbranch:
112 112 if not primary(hashbranch):
113 113 revs.append(hashbranch)
114 114 return revs, revs[0]
115 115
116 116
117 117 def parseurl(path, branches=None):
118 118 '''parse url#branch, returning (url, (branch, branches))'''
119 119
120 120 u = util.url(path)
121 121 branch = None
122 122 if u.fragment:
123 123 branch = u.fragment
124 124 u.fragment = None
125 125 return bytes(u), (branch, branches or [])
126 126
127 127
128 128 schemes = {
129 129 b'bundle': bundlerepo,
130 130 b'union': unionrepo,
131 131 b'file': _local,
132 132 b'http': httppeer,
133 133 b'https': httppeer,
134 134 b'ssh': sshpeer,
135 135 b'static-http': statichttprepo,
136 136 }
137 137
138 138
139 139 def _peerlookup(path):
140 140 u = util.url(path)
141 141 scheme = u.scheme or b'file'
142 142 thing = schemes.get(scheme) or schemes[b'file']
143 143 try:
144 144 return thing(path)
145 145 except TypeError:
146 146 # we can't test callable(thing) because 'thing' can be an unloaded
147 147 # module that implements __call__
148 148 if not util.safehasattr(thing, b'instance'):
149 149 raise
150 150 return thing
151 151
152 152
153 153 def islocal(repo):
154 154 '''return true if repo (or path pointing to repo) is local'''
155 155 if isinstance(repo, bytes):
156 156 try:
157 157 return _peerlookup(repo).islocal(repo)
158 158 except AttributeError:
159 159 return False
160 160 return repo.local()
161 161
162 162
163 163 def openpath(ui, path, sendaccept=True):
164 164 '''open path with open if local, url.open if remote'''
165 165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 166 if pathurl.islocal():
167 167 return util.posixfile(pathurl.localpath(), b'rb')
168 168 else:
169 169 return url.open(ui, path, sendaccept=sendaccept)
170 170
171 171
172 172 # a list of (ui, repo) functions called for wire peer initialization
173 173 wirepeersetupfuncs = []
174 174
175 175
176 176 def _peerorrepo(
177 177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 178 ):
179 179 """return a repository object for the specified path"""
180 180 obj = _peerlookup(path).instance(
181 181 ui, path, create, intents=intents, createopts=createopts
182 182 )
183 183 ui = getattr(obj, "ui", ui)
184 184 for f in presetupfuncs or []:
185 185 f(ui, obj)
186 186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 187 with util.timedcm('all reposetup') as allreposetupstats:
188 188 for name, module in extensions.extensions(ui):
189 189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 190 hook = getattr(module, 'reposetup', None)
191 191 if hook:
192 192 with util.timedcm('reposetup %r', name) as stats:
193 193 hook(ui, obj)
194 194 ui.log(
195 195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 196 )
197 197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 198 if not obj.local():
199 199 for f in wirepeersetupfuncs:
200 200 f(ui, obj)
201 201 return obj
202 202
203 203
204 204 def repository(
205 205 ui,
206 206 path=b'',
207 207 create=False,
208 208 presetupfuncs=None,
209 209 intents=None,
210 210 createopts=None,
211 211 ):
212 212 """return a repository object for the specified path"""
213 213 peer = _peerorrepo(
214 214 ui,
215 215 path,
216 216 create,
217 217 presetupfuncs=presetupfuncs,
218 218 intents=intents,
219 219 createopts=createopts,
220 220 )
221 221 repo = peer.local()
222 222 if not repo:
223 223 raise error.Abort(
224 224 _(b"repository '%s' is not local") % (path or peer.url())
225 225 )
226 226 return repo.filtered(b'visible')
227 227
228 228
229 229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 230 '''return a repository peer for the specified path'''
231 231 rui = remoteui(uiorrepo, opts)
232 232 return _peerorrepo(
233 233 rui, path, create, intents=intents, createopts=createopts
234 234 ).peer()
235 235
236 236
237 237 def defaultdest(source):
238 238 '''return default destination of clone if none is given
239 239
240 240 >>> defaultdest(b'foo')
241 241 'foo'
242 242 >>> defaultdest(b'/foo/bar')
243 243 'bar'
244 244 >>> defaultdest(b'/')
245 245 ''
246 246 >>> defaultdest(b'')
247 247 ''
248 248 >>> defaultdest(b'http://example.org/')
249 249 ''
250 250 >>> defaultdest(b'http://example.org/foo/')
251 251 'foo'
252 252 '''
253 253 path = util.url(source).path
254 254 if not path:
255 255 return b''
256 256 return os.path.basename(os.path.normpath(path))
257 257
258 258
259 259 def sharedreposource(repo):
260 260 """Returns repository object for source repository of a shared repo.
261 261
262 262 If repo is not a shared repository, returns None.
263 263 """
264 264 if repo.sharedpath == repo.path:
265 265 return None
266 266
267 267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 268 return repo.srcrepo
269 269
270 270 # the sharedpath always ends in the .hg; we want the path to the repo
271 271 source = repo.vfs.split(repo.sharedpath)[0]
272 272 srcurl, branches = parseurl(source)
273 273 srcrepo = repository(repo.ui, srcurl)
274 274 repo.srcrepo = srcrepo
275 275 return srcrepo
276 276
277 277
278 278 def share(
279 279 ui,
280 280 source,
281 281 dest=None,
282 282 update=True,
283 283 bookmarks=True,
284 284 defaultpath=None,
285 285 relative=False,
286 286 ):
287 287 '''create a shared repository'''
288 288
289 289 if not islocal(source):
290 290 raise error.Abort(_(b'can only share local repositories'))
291 291
292 292 if not dest:
293 293 dest = defaultdest(source)
294 294 else:
295 295 dest = ui.expandpath(dest)
296 296
297 297 if isinstance(source, bytes):
298 298 origsource = ui.expandpath(source)
299 299 source, branches = parseurl(origsource)
300 300 srcrepo = repository(ui, source)
301 301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 302 else:
303 303 srcrepo = source.local()
304 304 checkout = None
305 305
306 306 shareditems = set()
307 307 if bookmarks:
308 308 shareditems.add(sharedbookmarks)
309 309
310 310 r = repository(
311 311 ui,
312 312 dest,
313 313 create=True,
314 314 createopts={
315 315 b'sharedrepo': srcrepo,
316 316 b'sharedrelative': relative,
317 317 b'shareditems': shareditems,
318 318 },
319 319 )
320 320
321 321 postshare(srcrepo, r, defaultpath=defaultpath)
322 322 r = repository(ui, dest)
323 323 _postshareupdate(r, update, checkout=checkout)
324 324 return r
325 325
326 326
327 327 def unshare(ui, repo):
328 328 """convert a shared repository to a normal one
329 329
330 330 Copy the store data to the repo and remove the sharedpath data.
331 331
332 332 Returns a new repository object representing the unshared repository.
333 333
334 334 The passed repository object is not usable after this function is
335 335 called.
336 336 """
337 337
338 338 with repo.lock():
339 339 # we use locks here because if we race with commit, we
340 340 # can end up with extra data in the cloned revlogs that's
341 341 # not pointed to by changesets, thus causing verify to
342 342 # fail
343 343 destlock = copystore(ui, repo, repo.path)
344 344 with destlock or util.nullcontextmanager():
345 345
346 346 sharefile = repo.vfs.join(b'sharedpath')
347 347 util.rename(sharefile, sharefile + b'.old')
348 348
349 349 repo.requirements.discard(b'shared')
350 350 repo.requirements.discard(b'relshared')
351 351 repo._writerequirements()
352 352
353 353 # Removing share changes some fundamental properties of the repo instance.
354 354 # So we instantiate a new repo object and operate on it rather than
355 355 # try to keep the existing repo usable.
356 356 newrepo = repository(repo.baseui, repo.root, create=False)
357 357
358 358 # TODO: figure out how to access subrepos that exist, but were previously
359 359 # removed from .hgsub
360 360 c = newrepo[b'.']
361 361 subs = c.substate
362 362 for s in sorted(subs):
363 363 c.sub(s).unshare()
364 364
365 365 localrepo.poisonrepository(repo)
366 366
367 367 return newrepo
368 368
369 369
370 370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 371 """Called after a new shared repo is created.
372 372
373 373 The new repo only has a requirements file and pointer to the source.
374 374 This function configures additional shared data.
375 375
376 376 Extensions can wrap this function and write additional entries to
377 377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 378 """
379 379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 380 if default:
381 381 template = b'[paths]\ndefault = %s\n'
382 382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 384 with destrepo.wlock():
385 385 narrowspec.copytoworkingcopy(destrepo)
386 386
387 387
388 388 def _postshareupdate(repo, update, checkout=None):
389 389 """Maybe perform a working directory update after a shared repo is created.
390 390
391 391 ``update`` can be a boolean or a revision to update to.
392 392 """
393 393 if not update:
394 394 return
395 395
396 396 repo.ui.status(_(b"updating working directory\n"))
397 397 if update is not True:
398 398 checkout = update
399 399 for test in (checkout, b'default', b'tip'):
400 400 if test is None:
401 401 continue
402 402 try:
403 403 uprev = repo.lookup(test)
404 404 break
405 405 except error.RepoLookupError:
406 406 continue
407 407 _update(repo, uprev)
408 408
409 409
410 410 def copystore(ui, srcrepo, destpath):
411 411 '''copy files from store of srcrepo in destpath
412 412
413 413 returns destlock
414 414 '''
415 415 destlock = None
416 416 try:
417 417 hardlink = None
418 418 topic = _(b'linking') if hardlink else _(b'copying')
419 419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 420 num = 0
421 421 srcpublishing = srcrepo.publishing()
422 422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 423 dstvfs = vfsmod.vfs(destpath)
424 424 for f in srcrepo.store.copylist():
425 425 if srcpublishing and f.endswith(b'phaseroots'):
426 426 continue
427 427 dstbase = os.path.dirname(f)
428 428 if dstbase and not dstvfs.exists(dstbase):
429 429 dstvfs.mkdir(dstbase)
430 430 if srcvfs.exists(f):
431 431 if f.endswith(b'data'):
432 432 # 'dstbase' may be empty (e.g. revlog format 0)
433 433 lockfile = os.path.join(dstbase, b"lock")
434 434 # lock to avoid premature writing to the target
435 435 destlock = lock.lock(dstvfs, lockfile)
436 436 hardlink, n = util.copyfiles(
437 437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 438 )
439 439 num += n
440 440 if hardlink:
441 441 ui.debug(b"linked %d files\n" % num)
442 442 else:
443 443 ui.debug(b"copied %d files\n" % num)
444 444 return destlock
445 445 except: # re-raises
446 446 release(destlock)
447 447 raise
448 448
449 449
450 450 def clonewithshare(
451 451 ui,
452 452 peeropts,
453 453 sharepath,
454 454 source,
455 455 srcpeer,
456 456 dest,
457 457 pull=False,
458 458 rev=None,
459 459 update=True,
460 460 stream=False,
461 461 ):
462 462 """Perform a clone using a shared repo.
463 463
464 464 The store for the repository will be located at <sharepath>/.hg. The
465 465 specified revisions will be cloned or pulled from "source". A shared repo
466 466 will be created at "dest" and a working copy will be created if "update" is
467 467 True.
468 468 """
469 469 revs = None
470 470 if rev:
471 471 if not srcpeer.capable(b'lookup'):
472 472 raise error.Abort(
473 473 _(
474 474 b"src repository does not support "
475 475 b"revision lookup and so doesn't "
476 476 b"support clone by revision"
477 477 )
478 478 )
479 479
480 480 # TODO this is batchable.
481 481 remoterevs = []
482 482 for r in rev:
483 483 with srcpeer.commandexecutor() as e:
484 484 remoterevs.append(
485 485 e.callcommand(b'lookup', {b'key': r,}).result()
486 486 )
487 487 revs = remoterevs
488 488
489 489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 490 # 2 clients may race creating or populating it.
491 491 pooldir = os.path.dirname(sharepath)
492 492 # lock class requires the directory to exist.
493 493 try:
494 494 util.makedir(pooldir, False)
495 495 except OSError as e:
496 496 if e.errno != errno.EEXIST:
497 497 raise
498 498
499 499 poolvfs = vfsmod.vfs(pooldir)
500 500 basename = os.path.basename(sharepath)
501 501
502 502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 503 if os.path.exists(sharepath):
504 504 ui.status(
505 505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 506 )
507 507 else:
508 508 ui.status(
509 509 _(b'(sharing from new pooled repository %s)\n') % basename
510 510 )
511 511 # Always use pull mode because hardlinks in share mode don't work
512 512 # well. Never update because working copies aren't necessary in
513 513 # share mode.
514 514 clone(
515 515 ui,
516 516 peeropts,
517 517 source,
518 518 dest=sharepath,
519 519 pull=True,
520 520 revs=rev,
521 521 update=False,
522 522 stream=stream,
523 523 )
524 524
525 525 # Resolve the value to put in [paths] section for the source.
526 526 if islocal(source):
527 527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 528 else:
529 529 defaultpath = source
530 530
531 531 sharerepo = repository(ui, path=sharepath)
532 532 destrepo = share(
533 533 ui,
534 534 sharerepo,
535 535 dest=dest,
536 536 update=False,
537 537 bookmarks=False,
538 538 defaultpath=defaultpath,
539 539 )
540 540
541 541 # We need to perform a pull against the dest repo to fetch bookmarks
542 542 # and other non-store data that isn't shared by default. In the case of
543 543 # non-existing shared repo, this means we pull from the remote twice. This
544 544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 545 # way to pull just non-changegroup data.
546 546 exchange.pull(destrepo, srcpeer, heads=revs)
547 547
548 548 _postshareupdate(destrepo, update)
549 549
550 550 return srcpeer, peer(ui, peeropts, dest)
551 551
552 552
553 553 # Recomputing branch cache might be slow on big repos,
554 554 # so just copy it
555 555 def _copycache(srcrepo, dstcachedir, fname):
556 556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 558 dstbranchcache = os.path.join(dstcachedir, fname)
559 559 if os.path.exists(srcbranchcache):
560 560 if not os.path.exists(dstcachedir):
561 561 os.mkdir(dstcachedir)
562 562 util.copyfile(srcbranchcache, dstbranchcache)
563 563
564 564
565 565 def clone(
566 566 ui,
567 567 peeropts,
568 568 source,
569 569 dest=None,
570 570 pull=False,
571 571 revs=None,
572 572 update=True,
573 573 stream=False,
574 574 branch=None,
575 575 shareopts=None,
576 576 storeincludepats=None,
577 577 storeexcludepats=None,
578 578 depth=None,
579 579 ):
580 580 """Make a copy of an existing repository.
581 581
582 582 Create a copy of an existing repository in a new directory. The
583 583 source and destination are URLs, as passed to the repository
584 584 function. Returns a pair of repository peers, the source and
585 585 newly created destination.
586 586
587 587 The location of the source is added to the new repository's
588 588 .hg/hgrc file, as the default to be used for future pulls and
589 589 pushes.
590 590
591 591 If an exception is raised, the partly cloned/updated destination
592 592 repository will be deleted.
593 593
594 594 Arguments:
595 595
596 596 source: repository object or URL
597 597
598 598 dest: URL of destination repository to create (defaults to base
599 599 name of source repository)
600 600
601 601 pull: always pull from source repository, even in local case or if the
602 602 server prefers streaming
603 603
604 604 stream: stream raw data uncompressed from repository (fast over
605 605 LAN, slow over WAN)
606 606
607 607 revs: revision to clone up to (implies pull=True)
608 608
609 609 update: update working directory after clone completes, if
610 610 destination is local repository (True means update to default rev,
611 611 anything else is treated as a revision)
612 612
613 613 branch: branches to clone
614 614
615 615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 616 activates auto sharing mode and defines the directory for stores. The
617 617 "mode" key determines how to construct the directory name of the shared
618 618 repository. "identity" means the name is derived from the node of the first
619 619 changeset in the repository. "remote" means the name is derived from the
620 620 remote's path/URL. Defaults to "identity."
621 621
622 622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 623 exclude in the repository copy, respectively. If not defined, all files
624 624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 625 only the requested files will be performed. If ``storeincludepats`` is not
626 626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 627 ``path:.``. If both are empty sets, no files will be cloned.
628 628 """
629 629
630 630 if isinstance(source, bytes):
631 631 origsource = ui.expandpath(source)
632 632 source, branches = parseurl(origsource, branch)
633 633 srcpeer = peer(ui, peeropts, source)
634 634 else:
635 635 srcpeer = source.peer() # in case we were called with a localrepo
636 636 branches = (None, branch or [])
637 637 origsource = source = srcpeer.url()
638 638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639 639
640 640 if dest is None:
641 641 dest = defaultdest(source)
642 642 if dest:
643 643 ui.status(_(b"destination directory: %s\n") % dest)
644 644 else:
645 645 dest = ui.expandpath(dest)
646 646
647 647 dest = util.urllocalpath(dest)
648 648 source = util.urllocalpath(source)
649 649
650 650 if not dest:
651 651 raise error.Abort(_(b"empty destination path is not valid"))
652 652
653 653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 654 if destvfs.lexists():
655 655 if not destvfs.isdir():
656 656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 657 elif destvfs.listdir():
658 658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659 659
660 660 createopts = {}
661 661 narrow = False
662 662
663 663 if storeincludepats is not None:
664 664 narrowspec.validatepatterns(storeincludepats)
665 665 narrow = True
666 666
667 667 if storeexcludepats is not None:
668 668 narrowspec.validatepatterns(storeexcludepats)
669 669 narrow = True
670 670
671 671 if narrow:
672 672 # Include everything by default if only exclusion patterns defined.
673 673 if storeexcludepats and not storeincludepats:
674 674 storeincludepats = {b'path:.'}
675 675
676 676 createopts[b'narrowfiles'] = True
677 677
678 678 if depth:
679 679 createopts[b'shallowfilestore'] = True
680 680
681 681 if srcpeer.capable(b'lfs-serve'):
682 682 # Repository creation honors the config if it disabled the extension, so
683 683 # we can't just announce that lfs will be enabled. This check avoids
684 684 # saying that lfs will be enabled, and then saying it's an unknown
685 685 # feature. The lfs creation option is set in either case so that a
686 686 # requirement is added. If the extension is explicitly disabled but the
687 687 # requirement is set, the clone aborts early, before transferring any
688 688 # data.
689 689 createopts[b'lfs'] = True
690 690
691 691 if extensions.disabledext(b'lfs'):
692 692 ui.status(
693 693 _(
694 694 b'(remote is using large file support (lfs), but it is '
695 695 b'explicitly disabled in the local configuration)\n'
696 696 )
697 697 )
698 698 else:
699 699 ui.status(
700 700 _(
701 701 b'(remote is using large file support (lfs); lfs will '
702 702 b'be enabled for this repository)\n'
703 703 )
704 704 )
705 705
706 706 shareopts = shareopts or {}
707 707 sharepool = shareopts.get(b'pool')
708 708 sharenamemode = shareopts.get(b'mode')
709 709 if sharepool and islocal(dest):
710 710 sharepath = None
711 711 if sharenamemode == b'identity':
712 712 # Resolve the name from the initial changeset in the remote
713 713 # repository. This returns nullid when the remote is empty. It
714 714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 715 # not available. If we fail to resolve, sharing is not enabled.
716 716 try:
717 717 with srcpeer.commandexecutor() as e:
718 718 rootnode = e.callcommand(
719 719 b'lookup', {b'key': b'0',}
720 720 ).result()
721 721
722 722 if rootnode != node.nullid:
723 723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 724 else:
725 725 ui.status(
726 726 _(
727 727 b'(not using pooled storage: '
728 728 b'remote appears to be empty)\n'
729 729 )
730 730 )
731 731 except error.RepoLookupError:
732 732 ui.status(
733 733 _(
734 734 b'(not using pooled storage: '
735 735 b'unable to resolve identity of remote)\n'
736 736 )
737 737 )
738 738 elif sharenamemode == b'remote':
739 739 sharepath = os.path.join(
740 740 sharepool, node.hex(hashutil.sha1(source).digest())
741 741 )
742 742 else:
743 743 raise error.Abort(
744 744 _(b'unknown share naming mode: %s') % sharenamemode
745 745 )
746 746
747 747 # TODO this is a somewhat arbitrary restriction.
748 748 if narrow:
749 749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 750 sharepath = None
751 751
752 752 if sharepath:
753 753 return clonewithshare(
754 754 ui,
755 755 peeropts,
756 756 sharepath,
757 757 source,
758 758 srcpeer,
759 759 dest,
760 760 pull=pull,
761 761 rev=revs,
762 762 update=update,
763 763 stream=stream,
764 764 )
765 765
766 766 srclock = destlock = cleandir = None
767 767 srcrepo = srcpeer.local()
768 768 try:
769 769 abspath = origsource
770 770 if islocal(origsource):
771 771 abspath = os.path.abspath(util.urllocalpath(origsource))
772 772
773 773 if islocal(dest):
774 774 cleandir = dest
775 775
776 776 copy = False
777 777 if (
778 778 srcrepo
779 779 and srcrepo.cancopy()
780 780 and islocal(dest)
781 781 and not phases.hassecret(srcrepo)
782 782 ):
783 783 copy = not pull and not revs
784 784
785 785 # TODO this is a somewhat arbitrary restriction.
786 786 if narrow:
787 787 copy = False
788 788
789 789 if copy:
790 790 try:
791 791 # we use a lock here because if we race with commit, we
792 792 # can end up with extra data in the cloned revlogs that's
793 793 # not pointed to by changesets, thus causing verify to
794 794 # fail
795 795 srclock = srcrepo.lock(wait=False)
796 796 except error.LockError:
797 797 copy = False
798 798
799 799 if copy:
800 800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 802 if not os.path.exists(dest):
803 803 util.makedirs(dest)
804 804 else:
805 805 # only clean up directories we create ourselves
806 806 cleandir = hgdir
807 807 try:
808 808 destpath = hgdir
809 809 util.makedir(destpath, notindexed=True)
810 810 except OSError as inst:
811 811 if inst.errno == errno.EEXIST:
812 812 cleandir = None
813 813 raise error.Abort(
814 814 _(b"destination '%s' already exists") % dest
815 815 )
816 816 raise
817 817
818 818 destlock = copystore(ui, srcrepo, destpath)
819 819 # copy bookmarks over
820 820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 822 if os.path.exists(srcbookmarks):
823 823 util.copyfile(srcbookmarks, dstbookmarks)
824 824
825 825 dstcachedir = os.path.join(destpath, b'cache')
826 826 for cache in cacheutil.cachetocopy(srcrepo):
827 827 _copycache(srcrepo, dstcachedir, cache)
828 828
829 829 # we need to re-init the repo after manually copying the data
830 830 # into it
831 831 destpeer = peer(srcrepo, peeropts, dest)
832 832 srcrepo.hook(
833 833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 834 )
835 835 else:
836 836 try:
837 837 # only pass ui when no srcrepo
838 838 destpeer = peer(
839 839 srcrepo or ui,
840 840 peeropts,
841 841 dest,
842 842 create=True,
843 843 createopts=createopts,
844 844 )
845 845 except OSError as inst:
846 846 if inst.errno == errno.EEXIST:
847 847 cleandir = None
848 848 raise error.Abort(
849 849 _(b"destination '%s' already exists") % dest
850 850 )
851 851 raise
852 852
853 853 if revs:
854 854 if not srcpeer.capable(b'lookup'):
855 855 raise error.Abort(
856 856 _(
857 857 b"src repository does not support "
858 858 b"revision lookup and so doesn't "
859 859 b"support clone by revision"
860 860 )
861 861 )
862 862
863 863 # TODO this is batchable.
864 864 remoterevs = []
865 865 for rev in revs:
866 866 with srcpeer.commandexecutor() as e:
867 867 remoterevs.append(
868 868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 869 )
870 870 revs = remoterevs
871 871
872 872 checkout = revs[0]
873 873 else:
874 874 revs = None
875 875 local = destpeer.local()
876 876 if local:
877 877 if narrow:
878 878 with local.wlock(), local.lock():
879 879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 880 narrowspec.copytoworkingcopy(local)
881 881
882 882 u = util.url(abspath)
883 883 defaulturl = bytes(u)
884 884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 885 if not stream:
886 886 if pull:
887 887 stream = False
888 888 else:
889 889 stream = None
890 890 # internal config: ui.quietbookmarkmove
891 891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 892 with local.ui.configoverride(overrides, b'clone'):
893 893 exchange.pull(
894 894 local,
895 895 srcpeer,
896 896 revs,
897 897 streamclonerequested=stream,
898 898 includepats=storeincludepats,
899 899 excludepats=storeexcludepats,
900 900 depth=depth,
901 901 )
902 902 elif srcrepo:
903 903 # TODO lift restriction once exchange.push() accepts narrow
904 904 # push.
905 905 if narrow:
906 906 raise error.Abort(
907 907 _(
908 908 b'narrow clone not available for '
909 909 b'remote destinations'
910 910 )
911 911 )
912 912
913 913 exchange.push(
914 914 srcrepo,
915 915 destpeer,
916 916 revs=revs,
917 917 bookmarks=srcrepo._bookmarks.keys(),
918 918 )
919 919 else:
920 920 raise error.Abort(
921 921 _(b"clone from remote to remote not supported")
922 922 )
923 923
924 924 cleandir = None
925 925
926 926 destrepo = destpeer.local()
927 927 if destrepo:
928 928 template = uimod.samplehgrcs[b'cloned']
929 929 u = util.url(abspath)
930 930 u.passwd = None
931 931 defaulturl = bytes(u)
932 932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 934
935 935 if ui.configbool(b'experimental', b'remotenames'):
936 936 logexchange.pullremotenames(destrepo, srcpeer)
937 937
938 938 if update:
939 939 if update is not True:
940 940 with srcpeer.commandexecutor() as e:
941 941 checkout = e.callcommand(
942 942 b'lookup', {b'key': update,}
943 943 ).result()
944 944
945 945 uprev = None
946 946 status = None
947 947 if checkout is not None:
948 948 # Some extensions (at least hg-git and hg-subversion) have
949 949 # a peer.lookup() implementation that returns a name instead
950 950 # of a nodeid. We work around it here until we've figured
951 951 # out a better solution.
952 952 if len(checkout) == 20 and checkout in destrepo:
953 953 uprev = checkout
954 954 elif scmutil.isrevsymbol(destrepo, checkout):
955 955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 956 else:
957 957 if update is not True:
958 958 try:
959 959 uprev = destrepo.lookup(update)
960 960 except error.RepoLookupError:
961 961 pass
962 962 if uprev is None:
963 963 try:
964 964 uprev = destrepo._bookmarks[b'@']
965 965 update = b'@'
966 966 bn = destrepo[uprev].branch()
967 967 if bn == b'default':
968 968 status = _(b"updating to bookmark @\n")
969 969 else:
970 970 status = (
971 971 _(b"updating to bookmark @ on branch %s\n") % bn
972 972 )
973 973 except KeyError:
974 974 try:
975 975 uprev = destrepo.branchtip(b'default')
976 976 except error.RepoLookupError:
977 977 uprev = destrepo.lookup(b'tip')
978 978 if not status:
979 979 bn = destrepo[uprev].branch()
980 980 status = _(b"updating to branch %s\n") % bn
981 981 destrepo.ui.status(status)
982 982 _update(destrepo, uprev)
983 983 if update in destrepo._bookmarks:
984 984 bookmarks.activate(destrepo, update)
985 985 finally:
986 986 release(srclock, destlock)
987 987 if cleandir is not None:
988 988 shutil.rmtree(cleandir, True)
989 989 if srcpeer is not None:
990 990 srcpeer.close()
991 991 return srcpeer, destpeer
992 992
993 993
994 994 def _showstats(repo, stats, quietempty=False):
995 995 if quietempty and stats.isempty():
996 996 return
997 997 repo.ui.status(
998 998 _(
999 999 b"%d files updated, %d files merged, "
1000 1000 b"%d files removed, %d files unresolved\n"
1001 1001 )
1002 1002 % (
1003 1003 stats.updatedcount,
1004 1004 stats.mergedcount,
1005 1005 stats.removedcount,
1006 1006 stats.unresolvedcount,
1007 1007 )
1008 1008 )
1009 1009
1010 1010
1011 1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 1012 """Update the working directory to node.
1013 1013
1014 1014 When overwrite is set, changes are clobbered, merged else
1015 1015
1016 1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 1017 return mergemod.update(
1018 1018 repo,
1019 1019 node,
1020 1020 branchmerge=False,
1021 1021 force=overwrite,
1022 1022 labels=[b'working copy', b'destination'],
1023 1023 updatecheck=updatecheck,
1024 1024 )
1025 1025
1026 1026
1027 1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 1028 """update the working directory to node"""
1029 1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 1030 _showstats(repo, stats, quietempty)
1031 1031 if stats.unresolvedcount:
1032 1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 1033 return stats.unresolvedcount > 0
1034 1034
1035 1035
1036 1036 # naming conflict in clone()
1037 1037 _update = update
1038 1038
1039 1039
1040 1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 1041 """forcibly switch the working directory to node, clobbering changes"""
1042 1042 stats = updaterepo(repo, node, True)
1043 1043 assert stats.unresolvedcount == 0
1044 1044 if show_stats:
1045 1045 _showstats(repo, stats, quietempty)
1046 1046
1047 1047
1048 1048 # naming conflict in updatetotally()
1049 1049 _clean = clean
1050 1050
1051 1051 _VALID_UPDATECHECKS = {
1052 1052 mergemod.UPDATECHECK_ABORT,
1053 1053 mergemod.UPDATECHECK_NONE,
1054 1054 mergemod.UPDATECHECK_LINEAR,
1055 1055 mergemod.UPDATECHECK_NO_CONFLICT,
1056 1056 }
1057 1057
1058 1058
1059 1059 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1060 1060 """Update the working directory with extra care for non-file components
1061 1061
1062 1062 This takes care of non-file components below:
1063 1063
1064 1064 :bookmark: might be advanced or (in)activated
1065 1065
1066 1066 This takes arguments below:
1067 1067
1068 1068 :checkout: to which revision the working directory is updated
1069 1069 :brev: a name, which might be a bookmark to be activated after updating
1070 1070 :clean: whether changes in the working directory can be discarded
1071 1071 :updatecheck: how to deal with a dirty working directory
1072 1072
1073 1073 Valid values for updatecheck are the UPDATECHECK_* constants
1074 1074 defined in the merge module. Passing `None` will result in using the
1075 1075 configured default.
1076 1076
1077 1077 * ABORT: abort if the working directory is dirty
1078 1078 * NONE: don't check (merge working directory changes into destination)
1079 1079 * LINEAR: check that update is linear before merging working directory
1080 1080 changes into destination
1081 1081 * NO_CONFLICT: check that the update does not result in file merges
1082 1082
1083 1083 This returns whether conflict is detected at updating or not.
1084 1084 """
1085 1085 if updatecheck is None:
1086 1086 updatecheck = ui.config(b'commands', b'update.check')
1087 1087 if updatecheck not in _VALID_UPDATECHECKS:
1088 1088 # If not configured, or invalid value configured
1089 1089 updatecheck = mergemod.UPDATECHECK_LINEAR
1090 1090 if updatecheck not in _VALID_UPDATECHECKS:
1091 1091 raise ValueError(
1092 1092 r'Invalid updatecheck value %r (can accept %r)'
1093 1093 % (updatecheck, _VALID_UPDATECHECKS)
1094 1094 )
1095 1095 with repo.wlock():
1096 1096 movemarkfrom = None
1097 1097 warndest = False
1098 1098 if checkout is None:
1099 1099 updata = destutil.destupdate(repo, clean=clean)
1100 1100 checkout, movemarkfrom, brev = updata
1101 1101 warndest = True
1102 1102
1103 1103 if clean:
1104 1104 ret = _clean(repo, checkout)
1105 1105 else:
1106 1106 if updatecheck == mergemod.UPDATECHECK_ABORT:
1107 1107 cmdutil.bailifchanged(repo, merge=False)
1108 1108 updatecheck = mergemod.UPDATECHECK_NONE
1109 1109 ret = _update(repo, checkout, updatecheck=updatecheck)
1110 1110
1111 1111 if not ret and movemarkfrom:
1112 1112 if movemarkfrom == repo[b'.'].node():
1113 1113 pass # no-op update
1114 1114 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1115 1115 b = ui.label(repo._activebookmark, b'bookmarks.active')
1116 1116 ui.status(_(b"updating bookmark %s\n") % b)
1117 1117 else:
1118 1118 # this can happen with a non-linear update
1119 1119 b = ui.label(repo._activebookmark, b'bookmarks')
1120 1120 ui.status(_(b"(leaving bookmark %s)\n") % b)
1121 1121 bookmarks.deactivate(repo)
1122 1122 elif brev in repo._bookmarks:
1123 1123 if brev != repo._activebookmark:
1124 1124 b = ui.label(brev, b'bookmarks.active')
1125 1125 ui.status(_(b"(activating bookmark %s)\n") % b)
1126 1126 bookmarks.activate(repo, brev)
1127 1127 elif brev:
1128 1128 if repo._activebookmark:
1129 1129 b = ui.label(repo._activebookmark, b'bookmarks')
1130 1130 ui.status(_(b"(leaving bookmark %s)\n") % b)
1131 1131 bookmarks.deactivate(repo)
1132 1132
1133 1133 if warndest:
1134 1134 destutil.statusotherdests(ui, repo)
1135 1135
1136 1136 return ret
1137 1137
1138 1138
1139 1139 def merge(
1140 1140 repo, node, force=False, remind=True, labels=None,
1141 1141 ):
1142 1142 """Branch merge with node, resolving changes. Return true if any
1143 1143 unresolved conflicts."""
1144 stats = mergemod.update(
1145 repo,
1146 node,
1147 branchmerge=True,
1148 force=force,
1149 mergeforce=force,
1150 labels=labels,
1151 )
1144 stats = mergemod.merge(repo[node], force=force, labels=labels)
1152 1145 _showstats(repo, stats)
1153 1146 if stats.unresolvedcount:
1154 1147 repo.ui.status(
1155 1148 _(
1156 1149 b"use 'hg resolve' to retry unresolved file merges "
1157 1150 b"or 'hg merge --abort' to abandon\n"
1158 1151 )
1159 1152 )
1160 1153 elif remind:
1161 1154 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1162 1155 return stats.unresolvedcount > 0
1163 1156
1164 1157
1165 1158 def abortmerge(ui, repo):
1166 1159 ms = mergemod.mergestate.read(repo)
1167 1160 if ms.active():
1168 1161 # there were conflicts
1169 1162 node = ms.localctx.hex()
1170 1163 else:
1171 1164 # there were no conficts, mergestate was not stored
1172 1165 node = repo[b'.'].hex()
1173 1166
1174 1167 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1175 1168 stats = mergemod.clean_update(repo[node])
1176 1169 assert stats.unresolvedcount == 0
1177 1170 _showstats(repo, stats)
1178 1171
1179 1172
1180 1173 def _incoming(
1181 1174 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1182 1175 ):
1183 1176 """
1184 1177 Helper for incoming / gincoming.
1185 1178 displaychlist gets called with
1186 1179 (remoterepo, incomingchangesetlist, displayer) parameters,
1187 1180 and is supposed to contain only code that can't be unified.
1188 1181 """
1189 1182 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1190 1183 other = peer(repo, opts, source)
1191 1184 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1192 1185 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1193 1186
1194 1187 if revs:
1195 1188 revs = [other.lookup(rev) for rev in revs]
1196 1189 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1197 1190 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1198 1191 )
1199 1192 try:
1200 1193 if not chlist:
1201 1194 ui.status(_(b"no changes found\n"))
1202 1195 return subreporecurse()
1203 1196 ui.pager(b'incoming')
1204 1197 displayer = logcmdutil.changesetdisplayer(
1205 1198 ui, other, opts, buffered=buffered
1206 1199 )
1207 1200 displaychlist(other, chlist, displayer)
1208 1201 displayer.close()
1209 1202 finally:
1210 1203 cleanupfn()
1211 1204 subreporecurse()
1212 1205 return 0 # exit code is zero since we found incoming changes
1213 1206
1214 1207
1215 1208 def incoming(ui, repo, source, opts):
1216 1209 def subreporecurse():
1217 1210 ret = 1
1218 1211 if opts.get(b'subrepos'):
1219 1212 ctx = repo[None]
1220 1213 for subpath in sorted(ctx.substate):
1221 1214 sub = ctx.sub(subpath)
1222 1215 ret = min(ret, sub.incoming(ui, source, opts))
1223 1216 return ret
1224 1217
1225 1218 def display(other, chlist, displayer):
1226 1219 limit = logcmdutil.getlimit(opts)
1227 1220 if opts.get(b'newest_first'):
1228 1221 chlist.reverse()
1229 1222 count = 0
1230 1223 for n in chlist:
1231 1224 if limit is not None and count >= limit:
1232 1225 break
1233 1226 parents = [p for p in other.changelog.parents(n) if p != nullid]
1234 1227 if opts.get(b'no_merges') and len(parents) == 2:
1235 1228 continue
1236 1229 count += 1
1237 1230 displayer.show(other[n])
1238 1231
1239 1232 return _incoming(display, subreporecurse, ui, repo, source, opts)
1240 1233
1241 1234
1242 1235 def _outgoing(ui, repo, dest, opts):
1243 1236 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1244 1237 if not path:
1245 1238 raise error.Abort(
1246 1239 _(b'default repository not configured!'),
1247 1240 hint=_(b"see 'hg help config.paths'"),
1248 1241 )
1249 1242 dest = path.pushloc or path.loc
1250 1243 branches = path.branch, opts.get(b'branch') or []
1251 1244
1252 1245 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1253 1246 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1254 1247 if revs:
1255 1248 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1256 1249
1257 1250 other = peer(repo, opts, dest)
1258 1251 outgoing = discovery.findcommonoutgoing(
1259 1252 repo, other, revs, force=opts.get(b'force')
1260 1253 )
1261 1254 o = outgoing.missing
1262 1255 if not o:
1263 1256 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1264 1257 return o, other
1265 1258
1266 1259
1267 1260 def outgoing(ui, repo, dest, opts):
1268 1261 def recurse():
1269 1262 ret = 1
1270 1263 if opts.get(b'subrepos'):
1271 1264 ctx = repo[None]
1272 1265 for subpath in sorted(ctx.substate):
1273 1266 sub = ctx.sub(subpath)
1274 1267 ret = min(ret, sub.outgoing(ui, dest, opts))
1275 1268 return ret
1276 1269
1277 1270 limit = logcmdutil.getlimit(opts)
1278 1271 o, other = _outgoing(ui, repo, dest, opts)
1279 1272 if not o:
1280 1273 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1281 1274 return recurse()
1282 1275
1283 1276 if opts.get(b'newest_first'):
1284 1277 o.reverse()
1285 1278 ui.pager(b'outgoing')
1286 1279 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1287 1280 count = 0
1288 1281 for n in o:
1289 1282 if limit is not None and count >= limit:
1290 1283 break
1291 1284 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1292 1285 if opts.get(b'no_merges') and len(parents) == 2:
1293 1286 continue
1294 1287 count += 1
1295 1288 displayer.show(repo[n])
1296 1289 displayer.close()
1297 1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1298 1291 recurse()
1299 1292 return 0 # exit code is zero since we found outgoing changes
1300 1293
1301 1294
1302 1295 def verify(repo, level=None):
1303 1296 """verify the consistency of a repository"""
1304 1297 ret = verifymod.verify(repo, level=level)
1305 1298
1306 1299 # Broken subrepo references in hidden csets don't seem worth worrying about,
1307 1300 # since they can't be pushed/pulled, and --hidden can be used if they are a
1308 1301 # concern.
1309 1302
1310 1303 # pathto() is needed for -R case
1311 1304 revs = repo.revs(
1312 1305 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1313 1306 )
1314 1307
1315 1308 if revs:
1316 1309 repo.ui.status(_(b'checking subrepo links\n'))
1317 1310 for rev in revs:
1318 1311 ctx = repo[rev]
1319 1312 try:
1320 1313 for subpath in ctx.substate:
1321 1314 try:
1322 1315 ret = (
1323 1316 ctx.sub(subpath, allowcreate=False).verify() or ret
1324 1317 )
1325 1318 except error.RepoError as e:
1326 1319 repo.ui.warn(b'%d: %s\n' % (rev, e))
1327 1320 except Exception:
1328 1321 repo.ui.warn(
1329 1322 _(b'.hgsubstate is corrupt in revision %s\n')
1330 1323 % node.short(ctx.node())
1331 1324 )
1332 1325
1333 1326 return ret
1334 1327
1335 1328
1336 1329 def remoteui(src, opts):
1337 1330 """build a remote ui from ui or repo and opts"""
1338 1331 if util.safehasattr(src, b'baseui'): # looks like a repository
1339 1332 dst = src.baseui.copy() # drop repo-specific config
1340 1333 src = src.ui # copy target options from repo
1341 1334 else: # assume it's a global ui object
1342 1335 dst = src.copy() # keep all global options
1343 1336
1344 1337 # copy ssh-specific options
1345 1338 for o in b'ssh', b'remotecmd':
1346 1339 v = opts.get(o) or src.config(b'ui', o)
1347 1340 if v:
1348 1341 dst.setconfig(b"ui", o, v, b'copied')
1349 1342
1350 1343 # copy bundle-specific options
1351 1344 r = src.config(b'bundle', b'mainreporoot')
1352 1345 if r:
1353 1346 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1354 1347
1355 1348 # copy selected local settings to the remote ui
1356 1349 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1357 1350 for key, val in src.configitems(sect):
1358 1351 dst.setconfig(sect, key, val, b'copied')
1359 1352 v = src.config(b'web', b'cacerts')
1360 1353 if v:
1361 1354 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1362 1355
1363 1356 return dst
1364 1357
1365 1358
1366 1359 # Files of interest
1367 1360 # Used to check if the repository has changed looking at mtime and size of
1368 1361 # these files.
1369 1362 foi = [
1370 1363 (b'spath', b'00changelog.i'),
1371 1364 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1372 1365 (b'spath', b'obsstore'),
1373 1366 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1374 1367 ]
1375 1368
1376 1369
1377 1370 class cachedlocalrepo(object):
1378 1371 """Holds a localrepository that can be cached and reused."""
1379 1372
1380 1373 def __init__(self, repo):
1381 1374 """Create a new cached repo from an existing repo.
1382 1375
1383 1376 We assume the passed in repo was recently created. If the
1384 1377 repo has changed between when it was created and when it was
1385 1378 turned into a cache, it may not refresh properly.
1386 1379 """
1387 1380 assert isinstance(repo, localrepo.localrepository)
1388 1381 self._repo = repo
1389 1382 self._state, self.mtime = self._repostate()
1390 1383 self._filtername = repo.filtername
1391 1384
1392 1385 def fetch(self):
1393 1386 """Refresh (if necessary) and return a repository.
1394 1387
1395 1388 If the cached instance is out of date, it will be recreated
1396 1389 automatically and returned.
1397 1390
1398 1391 Returns a tuple of the repo and a boolean indicating whether a new
1399 1392 repo instance was created.
1400 1393 """
1401 1394 # We compare the mtimes and sizes of some well-known files to
1402 1395 # determine if the repo changed. This is not precise, as mtimes
1403 1396 # are susceptible to clock skew and imprecise filesystems and
1404 1397 # file content can change while maintaining the same size.
1405 1398
1406 1399 state, mtime = self._repostate()
1407 1400 if state == self._state:
1408 1401 return self._repo, False
1409 1402
1410 1403 repo = repository(self._repo.baseui, self._repo.url())
1411 1404 if self._filtername:
1412 1405 self._repo = repo.filtered(self._filtername)
1413 1406 else:
1414 1407 self._repo = repo.unfiltered()
1415 1408 self._state = state
1416 1409 self.mtime = mtime
1417 1410
1418 1411 return self._repo, True
1419 1412
1420 1413 def _repostate(self):
1421 1414 state = []
1422 1415 maxmtime = -1
1423 1416 for attr, fname in foi:
1424 1417 prefix = getattr(self._repo, attr)
1425 1418 p = os.path.join(prefix, fname)
1426 1419 try:
1427 1420 st = os.stat(p)
1428 1421 except OSError:
1429 1422 st = os.stat(prefix)
1430 1423 state.append((st[stat.ST_MTIME], st.st_size))
1431 1424 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1432 1425
1433 1426 return tuple(state), maxmtime
1434 1427
1435 1428 def copy(self):
1436 1429 """Obtain a copy of this class instance.
1437 1430
1438 1431 A new localrepository instance is obtained. The new instance should be
1439 1432 completely independent of the original.
1440 1433 """
1441 1434 repo = repository(self._repo.baseui, self._repo.origroot)
1442 1435 if self._filtername:
1443 1436 repo = repo.filtered(self._filtername)
1444 1437 else:
1445 1438 repo = repo.unfiltered()
1446 1439 c = cachedlocalrepo(repo)
1447 1440 c._state = self._state
1448 1441 c.mtime = self.mtime
1449 1442 return c
@@ -1,2770 +1,2787 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import shutil
12 12 import stat
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullhex,
22 22 nullid,
23 23 nullrev,
24 24 )
25 25 from .pycompat import delattr
26 26 from .thirdparty import attr
27 27 from . import (
28 28 copies,
29 29 encoding,
30 30 error,
31 31 filemerge,
32 32 match as matchmod,
33 33 obsutil,
34 34 pathutil,
35 35 pycompat,
36 36 scmutil,
37 37 subrepoutil,
38 38 util,
39 39 worker,
40 40 )
41 41 from .utils import hashutil
42 42
43 43 _pack = struct.pack
44 44 _unpack = struct.unpack
45 45
46 46
47 47 def _droponode(data):
48 48 # used for compatibility for v1
49 49 bits = data.split(b'\0')
50 50 bits = bits[:-2] + bits[-1:]
51 51 return b'\0'.join(bits)
52 52
53 53
54 54 # Merge state record types. See ``mergestate`` docs for more.
55 55 RECORD_LOCAL = b'L'
56 56 RECORD_OTHER = b'O'
57 57 RECORD_MERGED = b'F'
58 58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 60 RECORD_PATH_CONFLICT = b'P'
61 61 RECORD_MERGE_DRIVER_STATE = b'm'
62 62 RECORD_FILE_VALUES = b'f'
63 63 RECORD_LABELS = b'l'
64 64 RECORD_OVERRIDE = b't'
65 65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67 67
68 68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 69 MERGE_DRIVER_STATE_MARKED = b'm'
70 70 MERGE_DRIVER_STATE_SUCCESS = b's'
71 71
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77 77
78 78 ACTION_FORGET = b'f'
79 79 ACTION_REMOVE = b'r'
80 80 ACTION_ADD = b'a'
81 81 ACTION_GET = b'g'
82 82 ACTION_PATH_CONFLICT = b'p'
83 83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 84 ACTION_ADD_MODIFIED = b'am'
85 85 ACTION_CREATED = b'c'
86 86 ACTION_DELETED_CHANGED = b'dc'
87 87 ACTION_CHANGED_DELETED = b'cd'
88 88 ACTION_MERGE = b'm'
89 89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 91 ACTION_KEEP = b'k'
92 92 ACTION_EXEC = b'e'
93 93 ACTION_CREATED_MERGE = b'cm'
94 94
95 95
96 96 class mergestate(object):
97 97 '''track 3-way merge state of individual files
98 98
99 99 The merge state is stored on disk when needed. Two files are used: one with
100 100 an old format (version 1), and one with a new format (version 2). Version 2
101 101 stores a superset of the data in version 1, including new kinds of records
102 102 in the future. For more about the new format, see the documentation for
103 103 `_readrecordsv2`.
104 104
105 105 Each record can contain arbitrary content, and has an associated type. This
106 106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 107 versions of Mercurial that don't support it should abort. If `type` is
108 108 lowercase, the record can be safely ignored.
109 109
110 110 Currently known records:
111 111
112 112 L: the node of the "local" part of the merge (hexified version)
113 113 O: the node of the "other" part of the merge (hexified version)
114 114 F: a file to be merged entry
115 115 C: a change/delete or delete/change conflict
116 116 D: a file that the external merge driver will merge internally
117 117 (experimental)
118 118 P: a path conflict (file vs directory)
119 119 m: the external merge driver defined for this merge plus its run state
120 120 (experimental)
121 121 f: a (filename, dictionary) tuple of optional values for a given file
122 122 X: unsupported mandatory record type (used in tests)
123 123 x: unsupported advisory record type (used in tests)
124 124 l: the labels for the parts of the merge.
125 125
126 126 Merge driver run states (experimental):
127 127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 128 to resolve or commit
129 129 m: driver-resolved files marked -- only needs to be run before commit
130 130 s: success/skipped -- does not need to be run any more
131 131
132 132 Merge record states (stored in self._state, indexed by filename):
133 133 u: unresolved conflict
134 134 r: resolved conflict
135 135 pu: unresolved path conflict (file conflicts with directory)
136 136 pr: resolved path conflict
137 137 d: driver-resolved conflict
138 138
139 139 The resolve command transitions between 'u' and 'r' for conflicts and
140 140 'pu' and 'pr' for path conflicts.
141 141 '''
142 142
143 143 statepathv1 = b'merge/state'
144 144 statepathv2 = b'merge/state2'
145 145
146 146 @staticmethod
147 147 def clean(repo, node=None, other=None, labels=None):
148 148 """Initialize a brand new merge state, removing any existing state on
149 149 disk."""
150 150 ms = mergestate(repo)
151 151 ms.reset(node, other, labels)
152 152 return ms
153 153
154 154 @staticmethod
155 155 def read(repo):
156 156 """Initialize the merge state, reading it from disk."""
157 157 ms = mergestate(repo)
158 158 ms._read()
159 159 return ms
160 160
161 161 def __init__(self, repo):
162 162 """Initialize the merge state.
163 163
164 164 Do not use this directly! Instead call read() or clean()."""
165 165 self._repo = repo
166 166 self._dirty = False
167 167 self._labels = None
168 168
169 169 def reset(self, node=None, other=None, labels=None):
170 170 self._state = {}
171 171 self._stateextras = {}
172 172 self._local = None
173 173 self._other = None
174 174 self._labels = labels
175 175 for var in ('localctx', 'otherctx'):
176 176 if var in vars(self):
177 177 delattr(self, var)
178 178 if node:
179 179 self._local = node
180 180 self._other = other
181 181 self._readmergedriver = None
182 182 if self.mergedriver:
183 183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 184 else:
185 185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 187 self._results = {}
188 188 self._dirty = False
189 189
190 190 def _read(self):
191 191 """Analyse each record content to restore a serialized state from disk
192 192
193 193 This function process "record" entry produced by the de-serialization
194 194 of on disk file.
195 195 """
196 196 self._state = {}
197 197 self._stateextras = {}
198 198 self._local = None
199 199 self._other = None
200 200 for var in ('localctx', 'otherctx'):
201 201 if var in vars(self):
202 202 delattr(self, var)
203 203 self._readmergedriver = None
204 204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 205 unsupported = set()
206 206 records = self._readrecords()
207 207 for rtype, record in records:
208 208 if rtype == RECORD_LOCAL:
209 209 self._local = bin(record)
210 210 elif rtype == RECORD_OTHER:
211 211 self._other = bin(record)
212 212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 213 bits = record.split(b'\0', 1)
214 214 mdstate = bits[1]
215 215 if len(mdstate) != 1 or mdstate not in (
216 216 MERGE_DRIVER_STATE_UNMARKED,
217 217 MERGE_DRIVER_STATE_MARKED,
218 218 MERGE_DRIVER_STATE_SUCCESS,
219 219 ):
220 220 # the merge driver should be idempotent, so just rerun it
221 221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222 222
223 223 self._readmergedriver = bits[0]
224 224 self._mdstate = mdstate
225 225 elif rtype in (
226 226 RECORD_MERGED,
227 227 RECORD_CHANGEDELETE_CONFLICT,
228 228 RECORD_PATH_CONFLICT,
229 229 RECORD_MERGE_DRIVER_MERGE,
230 230 ):
231 231 bits = record.split(b'\0')
232 232 self._state[bits[0]] = bits[1:]
233 233 elif rtype == RECORD_FILE_VALUES:
234 234 filename, rawextras = record.split(b'\0', 1)
235 235 extraparts = rawextras.split(b'\0')
236 236 extras = {}
237 237 i = 0
238 238 while i < len(extraparts):
239 239 extras[extraparts[i]] = extraparts[i + 1]
240 240 i += 2
241 241
242 242 self._stateextras[filename] = extras
243 243 elif rtype == RECORD_LABELS:
244 244 labels = record.split(b'\0', 2)
245 245 self._labels = [l for l in labels if len(l) > 0]
246 246 elif not rtype.islower():
247 247 unsupported.add(rtype)
248 248 self._results = {}
249 249 self._dirty = False
250 250
251 251 if unsupported:
252 252 raise error.UnsupportedMergeRecords(unsupported)
253 253
254 254 def _readrecords(self):
255 255 """Read merge state from disk and return a list of record (TYPE, data)
256 256
257 257 We read data from both v1 and v2 files and decide which one to use.
258 258
259 259 V1 has been used by version prior to 2.9.1 and contains less data than
260 260 v2. We read both versions and check if no data in v2 contradicts
261 261 v1. If there is not contradiction we can safely assume that both v1
262 262 and v2 were written at the same time and use the extract data in v2. If
263 263 there is contradiction we ignore v2 content as we assume an old version
264 264 of Mercurial has overwritten the mergestate file and left an old v2
265 265 file around.
266 266
267 267 returns list of record [(TYPE, data), ...]"""
268 268 v1records = self._readrecordsv1()
269 269 v2records = self._readrecordsv2()
270 270 if self._v1v2match(v1records, v2records):
271 271 return v2records
272 272 else:
273 273 # v1 file is newer than v2 file, use it
274 274 # we have to infer the "other" changeset of the merge
275 275 # we cannot do better than that with v1 of the format
276 276 mctx = self._repo[None].parents()[-1]
277 277 v1records.append((RECORD_OTHER, mctx.hex()))
278 278 # add place holder "other" file node information
279 279 # nobody is using it yet so we do no need to fetch the data
280 280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 281 for idx, r in enumerate(v1records):
282 282 if r[0] == RECORD_MERGED:
283 283 bits = r[1].split(b'\0')
284 284 bits.insert(-2, b'')
285 285 v1records[idx] = (r[0], b'\0'.join(bits))
286 286 return v1records
287 287
288 288 def _v1v2match(self, v1records, v2records):
289 289 oldv2 = set() # old format version of v2 record
290 290 for rec in v2records:
291 291 if rec[0] == RECORD_LOCAL:
292 292 oldv2.add(rec)
293 293 elif rec[0] == RECORD_MERGED:
294 294 # drop the onode data (not contained in v1)
295 295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 296 for rec in v1records:
297 297 if rec not in oldv2:
298 298 return False
299 299 else:
300 300 return True
301 301
302 302 def _readrecordsv1(self):
303 303 """read on disk merge state for version 1 file
304 304
305 305 returns list of record [(TYPE, data), ...]
306 306
307 307 Note: the "F" data from this file are one entry short
308 308 (no "other file node" entry)
309 309 """
310 310 records = []
311 311 try:
312 312 f = self._repo.vfs(self.statepathv1)
313 313 for i, l in enumerate(f):
314 314 if i == 0:
315 315 records.append((RECORD_LOCAL, l[:-1]))
316 316 else:
317 317 records.append((RECORD_MERGED, l[:-1]))
318 318 f.close()
319 319 except IOError as err:
320 320 if err.errno != errno.ENOENT:
321 321 raise
322 322 return records
323 323
324 324 def _readrecordsv2(self):
325 325 """read on disk merge state for version 2 file
326 326
327 327 This format is a list of arbitrary records of the form:
328 328
329 329 [type][length][content]
330 330
331 331 `type` is a single character, `length` is a 4 byte integer, and
332 332 `content` is an arbitrary byte sequence of length `length`.
333 333
334 334 Mercurial versions prior to 3.7 have a bug where if there are
335 335 unsupported mandatory merge records, attempting to clear out the merge
336 336 state with hg update --clean or similar aborts. The 't' record type
337 337 works around that by writing out what those versions treat as an
338 338 advisory record, but later versions interpret as special: the first
339 339 character is the 'real' record type and everything onwards is the data.
340 340
341 341 Returns list of records [(TYPE, data), ...]."""
342 342 records = []
343 343 try:
344 344 f = self._repo.vfs(self.statepathv2)
345 345 data = f.read()
346 346 off = 0
347 347 end = len(data)
348 348 while off < end:
349 349 rtype = data[off : off + 1]
350 350 off += 1
351 351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 352 off += 4
353 353 record = data[off : (off + length)]
354 354 off += length
355 355 if rtype == RECORD_OVERRIDE:
356 356 rtype, record = record[0:1], record[1:]
357 357 records.append((rtype, record))
358 358 f.close()
359 359 except IOError as err:
360 360 if err.errno != errno.ENOENT:
361 361 raise
362 362 return records
363 363
364 364 @util.propertycache
365 365 def mergedriver(self):
366 366 # protect against the following:
367 367 # - A configures a malicious merge driver in their hgrc, then
368 368 # pauses the merge
369 369 # - A edits their hgrc to remove references to the merge driver
370 370 # - A gives a copy of their entire repo, including .hg, to B
371 371 # - B inspects .hgrc and finds it to be clean
372 372 # - B then continues the merge and the malicious merge driver
373 373 # gets invoked
374 374 configmergedriver = self._repo.ui.config(
375 375 b'experimental', b'mergedriver'
376 376 )
377 377 if (
378 378 self._readmergedriver is not None
379 379 and self._readmergedriver != configmergedriver
380 380 ):
381 381 raise error.ConfigError(
382 382 _(b"merge driver changed since merge started"),
383 383 hint=_(b"revert merge driver change or abort merge"),
384 384 )
385 385
386 386 return configmergedriver
387 387
388 388 @util.propertycache
389 389 def local(self):
390 390 if self._local is None:
391 391 msg = b"local accessed but self._local isn't set"
392 392 raise error.ProgrammingError(msg)
393 393 return self._local
394 394
395 395 @util.propertycache
396 396 def localctx(self):
397 397 return self._repo[self.local]
398 398
399 399 @util.propertycache
400 400 def other(self):
401 401 if self._other is None:
402 402 msg = b"other accessed but self._other isn't set"
403 403 raise error.ProgrammingError(msg)
404 404 return self._other
405 405
406 406 @util.propertycache
407 407 def otherctx(self):
408 408 return self._repo[self.other]
409 409
410 410 def active(self):
411 411 """Whether mergestate is active.
412 412
413 413 Returns True if there appears to be mergestate. This is a rough proxy
414 414 for "is a merge in progress."
415 415 """
416 416 return bool(self._local) or bool(self._state)
417 417
418 418 def commit(self):
419 419 """Write current state on disk (if necessary)"""
420 420 if self._dirty:
421 421 records = self._makerecords()
422 422 self._writerecords(records)
423 423 self._dirty = False
424 424
425 425 def _makerecords(self):
426 426 records = []
427 427 records.append((RECORD_LOCAL, hex(self._local)))
428 428 records.append((RECORD_OTHER, hex(self._other)))
429 429 if self.mergedriver:
430 430 records.append(
431 431 (
432 432 RECORD_MERGE_DRIVER_STATE,
433 433 b'\0'.join([self.mergedriver, self._mdstate]),
434 434 )
435 435 )
436 436 # Write out state items. In all cases, the value of the state map entry
437 437 # is written as the contents of the record. The record type depends on
438 438 # the type of state that is stored, and capital-letter records are used
439 439 # to prevent older versions of Mercurial that do not support the feature
440 440 # from loading them.
441 441 for filename, v in pycompat.iteritems(self._state):
442 442 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
443 443 # Driver-resolved merge. These are stored in 'D' records.
444 444 records.append(
445 445 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
446 446 )
447 447 elif v[0] in (
448 448 MERGE_RECORD_UNRESOLVED_PATH,
449 449 MERGE_RECORD_RESOLVED_PATH,
450 450 ):
451 451 # Path conflicts. These are stored in 'P' records. The current
452 452 # resolution state ('pu' or 'pr') is stored within the record.
453 453 records.append(
454 454 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
455 455 )
456 456 elif v[1] == nullhex or v[6] == nullhex:
457 457 # Change/Delete or Delete/Change conflicts. These are stored in
458 458 # 'C' records. v[1] is the local file, and is nullhex when the
459 459 # file is deleted locally ('dc'). v[6] is the remote file, and
460 460 # is nullhex when the file is deleted remotely ('cd').
461 461 records.append(
462 462 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
463 463 )
464 464 else:
465 465 # Normal files. These are stored in 'F' records.
466 466 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
467 467 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
468 468 rawextras = b'\0'.join(
469 469 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
470 470 )
471 471 records.append(
472 472 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
473 473 )
474 474 if self._labels is not None:
475 475 labels = b'\0'.join(self._labels)
476 476 records.append((RECORD_LABELS, labels))
477 477 return records
478 478
479 479 def _writerecords(self, records):
480 480 """Write current state on disk (both v1 and v2)"""
481 481 self._writerecordsv1(records)
482 482 self._writerecordsv2(records)
483 483
484 484 def _writerecordsv1(self, records):
485 485 """Write current state on disk in a version 1 file"""
486 486 f = self._repo.vfs(self.statepathv1, b'wb')
487 487 irecords = iter(records)
488 488 lrecords = next(irecords)
489 489 assert lrecords[0] == RECORD_LOCAL
490 490 f.write(hex(self._local) + b'\n')
491 491 for rtype, data in irecords:
492 492 if rtype == RECORD_MERGED:
493 493 f.write(b'%s\n' % _droponode(data))
494 494 f.close()
495 495
496 496 def _writerecordsv2(self, records):
497 497 """Write current state on disk in a version 2 file
498 498
499 499 See the docstring for _readrecordsv2 for why we use 't'."""
500 500 # these are the records that all version 2 clients can read
501 501 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
502 502 f = self._repo.vfs(self.statepathv2, b'wb')
503 503 for key, data in records:
504 504 assert len(key) == 1
505 505 if key not in allowlist:
506 506 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
507 507 format = b'>sI%is' % len(data)
508 508 f.write(_pack(format, key, len(data), data))
509 509 f.close()
510 510
511 511 @staticmethod
512 512 def getlocalkey(path):
513 513 """hash the path of a local file context for storage in the .hg/merge
514 514 directory."""
515 515
516 516 return hex(hashutil.sha1(path).digest())
517 517
518 518 def add(self, fcl, fco, fca, fd):
519 519 """add a new (potentially?) conflicting file the merge state
520 520 fcl: file context for local,
521 521 fco: file context for remote,
522 522 fca: file context for ancestors,
523 523 fd: file path of the resulting merge.
524 524
525 525 note: also write the local version to the `.hg/merge` directory.
526 526 """
527 527 if fcl.isabsent():
528 528 localkey = nullhex
529 529 else:
530 530 localkey = mergestate.getlocalkey(fcl.path())
531 531 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
532 532 self._state[fd] = [
533 533 MERGE_RECORD_UNRESOLVED,
534 534 localkey,
535 535 fcl.path(),
536 536 fca.path(),
537 537 hex(fca.filenode()),
538 538 fco.path(),
539 539 hex(fco.filenode()),
540 540 fcl.flags(),
541 541 ]
542 542 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
543 543 self._dirty = True
544 544
545 545 def addpath(self, path, frename, forigin):
546 546 """add a new conflicting path to the merge state
547 547 path: the path that conflicts
548 548 frename: the filename the conflicting file was renamed to
549 549 forigin: origin of the file ('l' or 'r' for local/remote)
550 550 """
551 551 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
552 552 self._dirty = True
553 553
554 554 def __contains__(self, dfile):
555 555 return dfile in self._state
556 556
557 557 def __getitem__(self, dfile):
558 558 return self._state[dfile][0]
559 559
560 560 def __iter__(self):
561 561 return iter(sorted(self._state))
562 562
563 563 def files(self):
564 564 return self._state.keys()
565 565
566 566 def mark(self, dfile, state):
567 567 self._state[dfile][0] = state
568 568 self._dirty = True
569 569
570 570 def mdstate(self):
571 571 return self._mdstate
572 572
573 573 def unresolved(self):
574 574 """Obtain the paths of unresolved files."""
575 575
576 576 for f, entry in pycompat.iteritems(self._state):
577 577 if entry[0] in (
578 578 MERGE_RECORD_UNRESOLVED,
579 579 MERGE_RECORD_UNRESOLVED_PATH,
580 580 ):
581 581 yield f
582 582
583 583 def driverresolved(self):
584 584 """Obtain the paths of driver-resolved files."""
585 585
586 586 for f, entry in self._state.items():
587 587 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
588 588 yield f
589 589
590 590 def extras(self, filename):
591 591 return self._stateextras.setdefault(filename, {})
592 592
593 593 def _resolve(self, preresolve, dfile, wctx):
594 594 """rerun merge process for file path `dfile`"""
595 595 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
596 596 return True, 0
597 597 stateentry = self._state[dfile]
598 598 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
599 599 octx = self._repo[self._other]
600 600 extras = self.extras(dfile)
601 601 anccommitnode = extras.get(b'ancestorlinknode')
602 602 if anccommitnode:
603 603 actx = self._repo[anccommitnode]
604 604 else:
605 605 actx = None
606 606 fcd = self._filectxorabsent(localkey, wctx, dfile)
607 607 fco = self._filectxorabsent(onode, octx, ofile)
608 608 # TODO: move this to filectxorabsent
609 609 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
610 610 # "premerge" x flags
611 611 flo = fco.flags()
612 612 fla = fca.flags()
613 613 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
614 614 if fca.node() == nullid and flags != flo:
615 615 if preresolve:
616 616 self._repo.ui.warn(
617 617 _(
618 618 b'warning: cannot merge flags for %s '
619 619 b'without common ancestor - keeping local flags\n'
620 620 )
621 621 % afile
622 622 )
623 623 elif flags == fla:
624 624 flags = flo
625 625 if preresolve:
626 626 # restore local
627 627 if localkey != nullhex:
628 628 f = self._repo.vfs(b'merge/' + localkey)
629 629 wctx[dfile].write(f.read(), flags)
630 630 f.close()
631 631 else:
632 632 wctx[dfile].remove(ignoremissing=True)
633 633 complete, r, deleted = filemerge.premerge(
634 634 self._repo,
635 635 wctx,
636 636 self._local,
637 637 lfile,
638 638 fcd,
639 639 fco,
640 640 fca,
641 641 labels=self._labels,
642 642 )
643 643 else:
644 644 complete, r, deleted = filemerge.filemerge(
645 645 self._repo,
646 646 wctx,
647 647 self._local,
648 648 lfile,
649 649 fcd,
650 650 fco,
651 651 fca,
652 652 labels=self._labels,
653 653 )
654 654 if r is None:
655 655 # no real conflict
656 656 del self._state[dfile]
657 657 self._stateextras.pop(dfile, None)
658 658 self._dirty = True
659 659 elif not r:
660 660 self.mark(dfile, MERGE_RECORD_RESOLVED)
661 661
662 662 if complete:
663 663 action = None
664 664 if deleted:
665 665 if fcd.isabsent():
666 666 # dc: local picked. Need to drop if present, which may
667 667 # happen on re-resolves.
668 668 action = ACTION_FORGET
669 669 else:
670 670 # cd: remote picked (or otherwise deleted)
671 671 action = ACTION_REMOVE
672 672 else:
673 673 if fcd.isabsent(): # dc: remote picked
674 674 action = ACTION_GET
675 675 elif fco.isabsent(): # cd: local picked
676 676 if dfile in self.localctx:
677 677 action = ACTION_ADD_MODIFIED
678 678 else:
679 679 action = ACTION_ADD
680 680 # else: regular merges (no action necessary)
681 681 self._results[dfile] = r, action
682 682
683 683 return complete, r
684 684
685 685 def _filectxorabsent(self, hexnode, ctx, f):
686 686 if hexnode == nullhex:
687 687 return filemerge.absentfilectx(ctx, f)
688 688 else:
689 689 return ctx[f]
690 690
691 691 def preresolve(self, dfile, wctx):
692 692 """run premerge process for dfile
693 693
694 694 Returns whether the merge is complete, and the exit code."""
695 695 return self._resolve(True, dfile, wctx)
696 696
697 697 def resolve(self, dfile, wctx):
698 698 """run merge process (assuming premerge was run) for dfile
699 699
700 700 Returns the exit code of the merge."""
701 701 return self._resolve(False, dfile, wctx)[1]
702 702
703 703 def counts(self):
704 704 """return counts for updated, merged and removed files in this
705 705 session"""
706 706 updated, merged, removed = 0, 0, 0
707 707 for r, action in pycompat.itervalues(self._results):
708 708 if r is None:
709 709 updated += 1
710 710 elif r == 0:
711 711 if action == ACTION_REMOVE:
712 712 removed += 1
713 713 else:
714 714 merged += 1
715 715 return updated, merged, removed
716 716
717 717 def unresolvedcount(self):
718 718 """get unresolved count for this merge (persistent)"""
719 719 return len(list(self.unresolved()))
720 720
721 721 def actions(self):
722 722 """return lists of actions to perform on the dirstate"""
723 723 actions = {
724 724 ACTION_REMOVE: [],
725 725 ACTION_FORGET: [],
726 726 ACTION_ADD: [],
727 727 ACTION_ADD_MODIFIED: [],
728 728 ACTION_GET: [],
729 729 }
730 730 for f, (r, action) in pycompat.iteritems(self._results):
731 731 if action is not None:
732 732 actions[action].append((f, None, b"merge result"))
733 733 return actions
734 734
735 735 def recordactions(self):
736 736 """record remove/add/get actions in the dirstate"""
737 737 branchmerge = self._repo.dirstate.p2() != nullid
738 738 recordupdates(self._repo, self.actions(), branchmerge, None)
739 739
740 740 def queueremove(self, f):
741 741 """queues a file to be removed from the dirstate
742 742
743 743 Meant for use by custom merge drivers."""
744 744 self._results[f] = 0, ACTION_REMOVE
745 745
746 746 def queueadd(self, f):
747 747 """queues a file to be added to the dirstate
748 748
749 749 Meant for use by custom merge drivers."""
750 750 self._results[f] = 0, ACTION_ADD
751 751
752 752 def queueget(self, f):
753 753 """queues a file to be marked modified in the dirstate
754 754
755 755 Meant for use by custom merge drivers."""
756 756 self._results[f] = 0, ACTION_GET
757 757
758 758
759 759 def _getcheckunknownconfig(repo, section, name):
760 760 config = repo.ui.config(section, name)
761 761 valid = [b'abort', b'ignore', b'warn']
762 762 if config not in valid:
763 763 validstr = b', '.join([b"'" + v + b"'" for v in valid])
764 764 raise error.ConfigError(
765 765 _(b"%s.%s not valid ('%s' is none of %s)")
766 766 % (section, name, config, validstr)
767 767 )
768 768 return config
769 769
770 770
771 771 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
772 772 if wctx.isinmemory():
773 773 # Nothing to do in IMM because nothing in the "working copy" can be an
774 774 # unknown file.
775 775 #
776 776 # Note that we should bail out here, not in ``_checkunknownfiles()``,
777 777 # because that function does other useful work.
778 778 return False
779 779
780 780 if f2 is None:
781 781 f2 = f
782 782 return (
783 783 repo.wvfs.audit.check(f)
784 784 and repo.wvfs.isfileorlink(f)
785 785 and repo.dirstate.normalize(f) not in repo.dirstate
786 786 and mctx[f2].cmp(wctx[f])
787 787 )
788 788
789 789
790 790 class _unknowndirschecker(object):
791 791 """
792 792 Look for any unknown files or directories that may have a path conflict
793 793 with a file. If any path prefix of the file exists as a file or link,
794 794 then it conflicts. If the file itself is a directory that contains any
795 795 file that is not tracked, then it conflicts.
796 796
797 797 Returns the shortest path at which a conflict occurs, or None if there is
798 798 no conflict.
799 799 """
800 800
801 801 def __init__(self):
802 802 # A set of paths known to be good. This prevents repeated checking of
803 803 # dirs. It will be updated with any new dirs that are checked and found
804 804 # to be safe.
805 805 self._unknowndircache = set()
806 806
807 807 # A set of paths that are known to be absent. This prevents repeated
808 808 # checking of subdirectories that are known not to exist. It will be
809 809 # updated with any new dirs that are checked and found to be absent.
810 810 self._missingdircache = set()
811 811
812 812 def __call__(self, repo, wctx, f):
813 813 if wctx.isinmemory():
814 814 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
815 815 return False
816 816
817 817 # Check for path prefixes that exist as unknown files.
818 818 for p in reversed(list(pathutil.finddirs(f))):
819 819 if p in self._missingdircache:
820 820 return
821 821 if p in self._unknowndircache:
822 822 continue
823 823 if repo.wvfs.audit.check(p):
824 824 if (
825 825 repo.wvfs.isfileorlink(p)
826 826 and repo.dirstate.normalize(p) not in repo.dirstate
827 827 ):
828 828 return p
829 829 if not repo.wvfs.lexists(p):
830 830 self._missingdircache.add(p)
831 831 return
832 832 self._unknowndircache.add(p)
833 833
834 834 # Check if the file conflicts with a directory containing unknown files.
835 835 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
836 836 # Does the directory contain any files that are not in the dirstate?
837 837 for p, dirs, files in repo.wvfs.walk(f):
838 838 for fn in files:
839 839 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
840 840 relf = repo.dirstate.normalize(relf, isknown=True)
841 841 if relf not in repo.dirstate:
842 842 return f
843 843 return None
844 844
845 845
846 846 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
847 847 """
848 848 Considers any actions that care about the presence of conflicting unknown
849 849 files. For some actions, the result is to abort; for others, it is to
850 850 choose a different action.
851 851 """
852 852 fileconflicts = set()
853 853 pathconflicts = set()
854 854 warnconflicts = set()
855 855 abortconflicts = set()
856 856 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
857 857 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
858 858 pathconfig = repo.ui.configbool(
859 859 b'experimental', b'merge.checkpathconflicts'
860 860 )
861 861 if not force:
862 862
863 863 def collectconflicts(conflicts, config):
864 864 if config == b'abort':
865 865 abortconflicts.update(conflicts)
866 866 elif config == b'warn':
867 867 warnconflicts.update(conflicts)
868 868
869 869 checkunknowndirs = _unknowndirschecker()
870 870 for f, (m, args, msg) in pycompat.iteritems(actions):
871 871 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
872 872 if _checkunknownfile(repo, wctx, mctx, f):
873 873 fileconflicts.add(f)
874 874 elif pathconfig and f not in wctx:
875 875 path = checkunknowndirs(repo, wctx, f)
876 876 if path is not None:
877 877 pathconflicts.add(path)
878 878 elif m == ACTION_LOCAL_DIR_RENAME_GET:
879 879 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
880 880 fileconflicts.add(f)
881 881
882 882 allconflicts = fileconflicts | pathconflicts
883 883 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
884 884 unknownconflicts = allconflicts - ignoredconflicts
885 885 collectconflicts(ignoredconflicts, ignoredconfig)
886 886 collectconflicts(unknownconflicts, unknownconfig)
887 887 else:
888 888 for f, (m, args, msg) in pycompat.iteritems(actions):
889 889 if m == ACTION_CREATED_MERGE:
890 890 fl2, anc = args
891 891 different = _checkunknownfile(repo, wctx, mctx, f)
892 892 if repo.dirstate._ignore(f):
893 893 config = ignoredconfig
894 894 else:
895 895 config = unknownconfig
896 896
897 897 # The behavior when force is True is described by this table:
898 898 # config different mergeforce | action backup
899 899 # * n * | get n
900 900 # * y y | merge -
901 901 # abort y n | merge - (1)
902 902 # warn y n | warn + get y
903 903 # ignore y n | get y
904 904 #
905 905 # (1) this is probably the wrong behavior here -- we should
906 906 # probably abort, but some actions like rebases currently
907 907 # don't like an abort happening in the middle of
908 908 # merge.update.
909 909 if not different:
910 910 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
911 911 elif mergeforce or config == b'abort':
912 912 actions[f] = (
913 913 ACTION_MERGE,
914 914 (f, f, None, False, anc),
915 915 b'remote differs from untracked local',
916 916 )
917 917 elif config == b'abort':
918 918 abortconflicts.add(f)
919 919 else:
920 920 if config == b'warn':
921 921 warnconflicts.add(f)
922 922 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
923 923
924 924 for f in sorted(abortconflicts):
925 925 warn = repo.ui.warn
926 926 if f in pathconflicts:
927 927 if repo.wvfs.isfileorlink(f):
928 928 warn(_(b"%s: untracked file conflicts with directory\n") % f)
929 929 else:
930 930 warn(_(b"%s: untracked directory conflicts with file\n") % f)
931 931 else:
932 932 warn(_(b"%s: untracked file differs\n") % f)
933 933 if abortconflicts:
934 934 raise error.Abort(
935 935 _(
936 936 b"untracked files in working directory "
937 937 b"differ from files in requested revision"
938 938 )
939 939 )
940 940
941 941 for f in sorted(warnconflicts):
942 942 if repo.wvfs.isfileorlink(f):
943 943 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
944 944 else:
945 945 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
946 946
947 947 for f, (m, args, msg) in pycompat.iteritems(actions):
948 948 if m == ACTION_CREATED:
949 949 backup = (
950 950 f in fileconflicts
951 951 or f in pathconflicts
952 952 or any(p in pathconflicts for p in pathutil.finddirs(f))
953 953 )
954 954 (flags,) = args
955 955 actions[f] = (ACTION_GET, (flags, backup), msg)
956 956
957 957
958 958 def _forgetremoved(wctx, mctx, branchmerge):
959 959 """
960 960 Forget removed files
961 961
962 962 If we're jumping between revisions (as opposed to merging), and if
963 963 neither the working directory nor the target rev has the file,
964 964 then we need to remove it from the dirstate, to prevent the
965 965 dirstate from listing the file when it is no longer in the
966 966 manifest.
967 967
968 968 If we're merging, and the other revision has removed a file
969 969 that is not present in the working directory, we need to mark it
970 970 as removed.
971 971 """
972 972
973 973 actions = {}
974 974 m = ACTION_FORGET
975 975 if branchmerge:
976 976 m = ACTION_REMOVE
977 977 for f in wctx.deleted():
978 978 if f not in mctx:
979 979 actions[f] = m, None, b"forget deleted"
980 980
981 981 if not branchmerge:
982 982 for f in wctx.removed():
983 983 if f not in mctx:
984 984 actions[f] = ACTION_FORGET, None, b"forget removed"
985 985
986 986 return actions
987 987
988 988
989 989 def _checkcollision(repo, wmf, actions):
990 990 """
991 991 Check for case-folding collisions.
992 992 """
993 993 # If the repo is narrowed, filter out files outside the narrowspec.
994 994 narrowmatch = repo.narrowmatch()
995 995 if not narrowmatch.always():
996 996 pmmf = set(wmf.walk(narrowmatch))
997 997 if actions:
998 998 narrowactions = {}
999 999 for m, actionsfortype in pycompat.iteritems(actions):
1000 1000 narrowactions[m] = []
1001 1001 for (f, args, msg) in actionsfortype:
1002 1002 if narrowmatch(f):
1003 1003 narrowactions[m].append((f, args, msg))
1004 1004 actions = narrowactions
1005 1005 else:
1006 1006 # build provisional merged manifest up
1007 1007 pmmf = set(wmf)
1008 1008
1009 1009 if actions:
1010 1010 # KEEP and EXEC are no-op
1011 1011 for m in (
1012 1012 ACTION_ADD,
1013 1013 ACTION_ADD_MODIFIED,
1014 1014 ACTION_FORGET,
1015 1015 ACTION_GET,
1016 1016 ACTION_CHANGED_DELETED,
1017 1017 ACTION_DELETED_CHANGED,
1018 1018 ):
1019 1019 for f, args, msg in actions[m]:
1020 1020 pmmf.add(f)
1021 1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 1022 pmmf.discard(f)
1023 1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 1024 f2, flags = args
1025 1025 pmmf.discard(f2)
1026 1026 pmmf.add(f)
1027 1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 1028 pmmf.add(f)
1029 1029 for f, args, msg in actions[ACTION_MERGE]:
1030 1030 f1, f2, fa, move, anc = args
1031 1031 if move:
1032 1032 pmmf.discard(f1)
1033 1033 pmmf.add(f)
1034 1034
1035 1035 # check case-folding collision in provisional merged manifest
1036 1036 foldmap = {}
1037 1037 for f in pmmf:
1038 1038 fold = util.normcase(f)
1039 1039 if fold in foldmap:
1040 1040 raise error.Abort(
1041 1041 _(b"case-folding collision between %s and %s")
1042 1042 % (f, foldmap[fold])
1043 1043 )
1044 1044 foldmap[fold] = f
1045 1045
1046 1046 # check case-folding of directories
1047 1047 foldprefix = unfoldprefix = lastfull = b''
1048 1048 for fold, f in sorted(foldmap.items()):
1049 1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 1050 # the folded prefix matches but actual casing is different
1051 1051 raise error.Abort(
1052 1052 _(b"case-folding collision between %s and directory of %s")
1053 1053 % (lastfull, f)
1054 1054 )
1055 1055 foldprefix = fold + b'/'
1056 1056 unfoldprefix = f + b'/'
1057 1057 lastfull = f
1058 1058
1059 1059
1060 1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 1061 """run the preprocess step of the merge driver, if any
1062 1062
1063 1063 This is currently not implemented -- it's an extension point."""
1064 1064 return True
1065 1065
1066 1066
1067 1067 def driverconclude(repo, ms, wctx, labels=None):
1068 1068 """run the conclude step of the merge driver, if any
1069 1069
1070 1070 This is currently not implemented -- it's an extension point."""
1071 1071 return True
1072 1072
1073 1073
1074 1074 def _filesindirs(repo, manifest, dirs):
1075 1075 """
1076 1076 Generator that yields pairs of all the files in the manifest that are found
1077 1077 inside the directories listed in dirs, and which directory they are found
1078 1078 in.
1079 1079 """
1080 1080 for f in manifest:
1081 1081 for p in pathutil.finddirs(f):
1082 1082 if p in dirs:
1083 1083 yield f, p
1084 1084 break
1085 1085
1086 1086
1087 1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 1088 """
1089 1089 Check if any actions introduce path conflicts in the repository, updating
1090 1090 actions to record or handle the path conflict accordingly.
1091 1091 """
1092 1092 mf = wctx.manifest()
1093 1093
1094 1094 # The set of local files that conflict with a remote directory.
1095 1095 localconflicts = set()
1096 1096
1097 1097 # The set of directories that conflict with a remote file, and so may cause
1098 1098 # conflicts if they still contain any files after the merge.
1099 1099 remoteconflicts = set()
1100 1100
1101 1101 # The set of directories that appear as both a file and a directory in the
1102 1102 # remote manifest. These indicate an invalid remote manifest, which
1103 1103 # can't be updated to cleanly.
1104 1104 invalidconflicts = set()
1105 1105
1106 1106 # The set of directories that contain files that are being created.
1107 1107 createdfiledirs = set()
1108 1108
1109 1109 # The set of files deleted by all the actions.
1110 1110 deletedfiles = set()
1111 1111
1112 1112 for f, (m, args, msg) in actions.items():
1113 1113 if m in (
1114 1114 ACTION_CREATED,
1115 1115 ACTION_DELETED_CHANGED,
1116 1116 ACTION_MERGE,
1117 1117 ACTION_CREATED_MERGE,
1118 1118 ):
1119 1119 # This action may create a new local file.
1120 1120 createdfiledirs.update(pathutil.finddirs(f))
1121 1121 if mf.hasdir(f):
1122 1122 # The file aliases a local directory. This might be ok if all
1123 1123 # the files in the local directory are being deleted. This
1124 1124 # will be checked once we know what all the deleted files are.
1125 1125 remoteconflicts.add(f)
1126 1126 # Track the names of all deleted files.
1127 1127 if m == ACTION_REMOVE:
1128 1128 deletedfiles.add(f)
1129 1129 if m == ACTION_MERGE:
1130 1130 f1, f2, fa, move, anc = args
1131 1131 if move:
1132 1132 deletedfiles.add(f1)
1133 1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 1134 f2, flags = args
1135 1135 deletedfiles.add(f2)
1136 1136
1137 1137 # Check all directories that contain created files for path conflicts.
1138 1138 for p in createdfiledirs:
1139 1139 if p in mf:
1140 1140 if p in mctx:
1141 1141 # A file is in a directory which aliases both a local
1142 1142 # and a remote file. This is an internal inconsistency
1143 1143 # within the remote manifest.
1144 1144 invalidconflicts.add(p)
1145 1145 else:
1146 1146 # A file is in a directory which aliases a local file.
1147 1147 # We will need to rename the local file.
1148 1148 localconflicts.add(p)
1149 1149 if p in actions and actions[p][0] in (
1150 1150 ACTION_CREATED,
1151 1151 ACTION_DELETED_CHANGED,
1152 1152 ACTION_MERGE,
1153 1153 ACTION_CREATED_MERGE,
1154 1154 ):
1155 1155 # The file is in a directory which aliases a remote file.
1156 1156 # This is an internal inconsistency within the remote
1157 1157 # manifest.
1158 1158 invalidconflicts.add(p)
1159 1159
1160 1160 # Rename all local conflicting files that have not been deleted.
1161 1161 for p in localconflicts:
1162 1162 if p not in deletedfiles:
1163 1163 ctxname = bytes(wctx).rstrip(b'+')
1164 1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 1165 actions[pnew] = (
1166 1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 1167 (p,),
1168 1168 b'local path conflict',
1169 1169 )
1170 1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171 1171
1172 1172 if remoteconflicts:
1173 1173 # Check if all files in the conflicting directories have been removed.
1174 1174 ctxname = bytes(mctx).rstrip(b'+')
1175 1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 1176 if f not in deletedfiles:
1177 1177 m, args, msg = actions[p]
1178 1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 1180 # Action was merge, just update target.
1181 1181 actions[pnew] = (m, args, msg)
1182 1182 else:
1183 1183 # Action was create, change to renamed get action.
1184 1184 fl = args[0]
1185 1185 actions[pnew] = (
1186 1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 1187 (p, fl),
1188 1188 b'remote path conflict',
1189 1189 )
1190 1190 actions[p] = (
1191 1191 ACTION_PATH_CONFLICT,
1192 1192 (pnew, ACTION_REMOVE),
1193 1193 b'path conflict',
1194 1194 )
1195 1195 remoteconflicts.remove(p)
1196 1196 break
1197 1197
1198 1198 if invalidconflicts:
1199 1199 for p in invalidconflicts:
1200 1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202 1202
1203 1203
1204 1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 1205 """
1206 1206 Filters out actions that can ignored because the repo is narrowed.
1207 1207
1208 1208 Raise an exception if the merge cannot be completed because the repo is
1209 1209 narrowed.
1210 1210 """
1211 1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 1213 # We mutate the items in the dict during iteration, so iterate
1214 1214 # over a copy.
1215 1215 for f, action in list(actions.items()):
1216 1216 if narrowmatch(f):
1217 1217 pass
1218 1218 elif not branchmerge:
1219 1219 del actions[f] # just updating, ignore changes outside clone
1220 1220 elif action[0] in nooptypes:
1221 1221 del actions[f] # merge does not affect file
1222 1222 elif action[0] in nonconflicttypes:
1223 1223 raise error.Abort(
1224 1224 _(
1225 1225 b'merge affects file \'%s\' outside narrow, '
1226 1226 b'which is not yet supported'
1227 1227 )
1228 1228 % f,
1229 1229 hint=_(b'merging in the other direction may work'),
1230 1230 )
1231 1231 else:
1232 1232 raise error.Abort(
1233 1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 1234 )
1235 1235
1236 1236
1237 1237 def manifestmerge(
1238 1238 repo,
1239 1239 wctx,
1240 1240 p2,
1241 1241 pa,
1242 1242 branchmerge,
1243 1243 force,
1244 1244 matcher,
1245 1245 acceptremote,
1246 1246 followcopies,
1247 1247 forcefulldiff=False,
1248 1248 ):
1249 1249 """
1250 1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251 1251
1252 1252 branchmerge and force are as passed in to update
1253 1253 matcher = matcher to filter file lists
1254 1254 acceptremote = accept the incoming changes without prompting
1255 1255 """
1256 1256 if matcher is not None and matcher.always():
1257 1257 matcher = None
1258 1258
1259 1259 # manifests fetched in order are going to be faster, so prime the caches
1260 1260 [
1261 1261 x.manifest()
1262 1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1263 1263 ]
1264 1264
1265 1265 branch_copies1 = copies.branch_copies()
1266 1266 branch_copies2 = copies.branch_copies()
1267 1267 diverge = {}
1268 1268 if followcopies:
1269 1269 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1270 1270 repo, wctx, p2, pa
1271 1271 )
1272 1272
1273 1273 boolbm = pycompat.bytestr(bool(branchmerge))
1274 1274 boolf = pycompat.bytestr(bool(force))
1275 1275 boolm = pycompat.bytestr(bool(matcher))
1276 1276 repo.ui.note(_(b"resolving manifests\n"))
1277 1277 repo.ui.debug(
1278 1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1279 1279 )
1280 1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1281 1281
1282 1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1283 1283 copied1 = set(branch_copies1.copy.values())
1284 1284 copied1.update(branch_copies1.movewithdir.values())
1285 1285 copied2 = set(branch_copies2.copy.values())
1286 1286 copied2.update(branch_copies2.movewithdir.values())
1287 1287
1288 1288 if b'.hgsubstate' in m1 and wctx.rev() is None:
1289 1289 # Check whether sub state is modified, and overwrite the manifest
1290 1290 # to flag the change. If wctx is a committed revision, we shouldn't
1291 1291 # care for the dirty state of the working directory.
1292 1292 if any(wctx.sub(s).dirty() for s in wctx.substate):
1293 1293 m1[b'.hgsubstate'] = modifiednodeid
1294 1294
1295 1295 # Don't use m2-vs-ma optimization if:
1296 1296 # - ma is the same as m1 or m2, which we're just going to diff again later
1297 1297 # - The caller specifically asks for a full diff, which is useful during bid
1298 1298 # merge.
1299 1299 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1300 1300 # Identify which files are relevant to the merge, so we can limit the
1301 1301 # total m1-vs-m2 diff to just those files. This has significant
1302 1302 # performance benefits in large repositories.
1303 1303 relevantfiles = set(ma.diff(m2).keys())
1304 1304
1305 1305 # For copied and moved files, we need to add the source file too.
1306 1306 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1307 1307 if copyvalue in relevantfiles:
1308 1308 relevantfiles.add(copykey)
1309 1309 for movedirkey in branch_copies1.movewithdir:
1310 1310 relevantfiles.add(movedirkey)
1311 1311 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1312 1312 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1313 1313
1314 1314 diff = m1.diff(m2, match=matcher)
1315 1315
1316 1316 actions = {}
1317 1317 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1318 1318 if n1 and n2: # file exists on both local and remote side
1319 1319 if f not in ma:
1320 1320 # TODO: what if they're renamed from different sources?
1321 1321 fa = branch_copies1.copy.get(
1322 1322 f, None
1323 1323 ) or branch_copies2.copy.get(f, None)
1324 1324 if fa is not None:
1325 1325 actions[f] = (
1326 1326 ACTION_MERGE,
1327 1327 (f, f, fa, False, pa.node()),
1328 1328 b'both renamed from %s' % fa,
1329 1329 )
1330 1330 else:
1331 1331 actions[f] = (
1332 1332 ACTION_MERGE,
1333 1333 (f, f, None, False, pa.node()),
1334 1334 b'both created',
1335 1335 )
1336 1336 else:
1337 1337 a = ma[f]
1338 1338 fla = ma.flags(f)
1339 1339 nol = b'l' not in fl1 + fl2 + fla
1340 1340 if n2 == a and fl2 == fla:
1341 1341 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1342 1342 elif n1 == a and fl1 == fla: # local unchanged - use remote
1343 1343 if n1 == n2: # optimization: keep local content
1344 1344 actions[f] = (
1345 1345 ACTION_EXEC,
1346 1346 (fl2,),
1347 1347 b'update permissions',
1348 1348 )
1349 1349 else:
1350 1350 actions[f] = (
1351 1351 ACTION_GET,
1352 1352 (fl2, False),
1353 1353 b'remote is newer',
1354 1354 )
1355 1355 elif nol and n2 == a: # remote only changed 'x'
1356 1356 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1357 1357 elif nol and n1 == a: # local only changed 'x'
1358 1358 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1359 1359 else: # both changed something
1360 1360 actions[f] = (
1361 1361 ACTION_MERGE,
1362 1362 (f, f, f, False, pa.node()),
1363 1363 b'versions differ',
1364 1364 )
1365 1365 elif n1: # file exists only on local side
1366 1366 if f in copied2:
1367 1367 pass # we'll deal with it on m2 side
1368 1368 elif (
1369 1369 f in branch_copies1.movewithdir
1370 1370 ): # directory rename, move local
1371 1371 f2 = branch_copies1.movewithdir[f]
1372 1372 if f2 in m2:
1373 1373 actions[f2] = (
1374 1374 ACTION_MERGE,
1375 1375 (f, f2, None, True, pa.node()),
1376 1376 b'remote directory rename, both created',
1377 1377 )
1378 1378 else:
1379 1379 actions[f2] = (
1380 1380 ACTION_DIR_RENAME_MOVE_LOCAL,
1381 1381 (f, fl1),
1382 1382 b'remote directory rename - move from %s' % f,
1383 1383 )
1384 1384 elif f in branch_copies1.copy:
1385 1385 f2 = branch_copies1.copy[f]
1386 1386 actions[f] = (
1387 1387 ACTION_MERGE,
1388 1388 (f, f2, f2, False, pa.node()),
1389 1389 b'local copied/moved from %s' % f2,
1390 1390 )
1391 1391 elif f in ma: # clean, a different, no remote
1392 1392 if n1 != ma[f]:
1393 1393 if acceptremote:
1394 1394 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1395 1395 else:
1396 1396 actions[f] = (
1397 1397 ACTION_CHANGED_DELETED,
1398 1398 (f, None, f, False, pa.node()),
1399 1399 b'prompt changed/deleted',
1400 1400 )
1401 1401 elif n1 == addednodeid:
1402 1402 # This extra 'a' is added by working copy manifest to mark
1403 1403 # the file as locally added. We should forget it instead of
1404 1404 # deleting it.
1405 1405 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1406 1406 else:
1407 1407 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1408 1408 elif n2: # file exists only on remote side
1409 1409 if f in copied1:
1410 1410 pass # we'll deal with it on m1 side
1411 1411 elif f in branch_copies2.movewithdir:
1412 1412 f2 = branch_copies2.movewithdir[f]
1413 1413 if f2 in m1:
1414 1414 actions[f2] = (
1415 1415 ACTION_MERGE,
1416 1416 (f2, f, None, False, pa.node()),
1417 1417 b'local directory rename, both created',
1418 1418 )
1419 1419 else:
1420 1420 actions[f2] = (
1421 1421 ACTION_LOCAL_DIR_RENAME_GET,
1422 1422 (f, fl2),
1423 1423 b'local directory rename - get from %s' % f,
1424 1424 )
1425 1425 elif f in branch_copies2.copy:
1426 1426 f2 = branch_copies2.copy[f]
1427 1427 if f2 in m2:
1428 1428 actions[f] = (
1429 1429 ACTION_MERGE,
1430 1430 (f2, f, f2, False, pa.node()),
1431 1431 b'remote copied from %s' % f2,
1432 1432 )
1433 1433 else:
1434 1434 actions[f] = (
1435 1435 ACTION_MERGE,
1436 1436 (f2, f, f2, True, pa.node()),
1437 1437 b'remote moved from %s' % f2,
1438 1438 )
1439 1439 elif f not in ma:
1440 1440 # local unknown, remote created: the logic is described by the
1441 1441 # following table:
1442 1442 #
1443 1443 # force branchmerge different | action
1444 1444 # n * * | create
1445 1445 # y n * | create
1446 1446 # y y n | create
1447 1447 # y y y | merge
1448 1448 #
1449 1449 # Checking whether the files are different is expensive, so we
1450 1450 # don't do that when we can avoid it.
1451 1451 if not force:
1452 1452 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1453 1453 elif not branchmerge:
1454 1454 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1455 1455 else:
1456 1456 actions[f] = (
1457 1457 ACTION_CREATED_MERGE,
1458 1458 (fl2, pa.node()),
1459 1459 b'remote created, get or merge',
1460 1460 )
1461 1461 elif n2 != ma[f]:
1462 1462 df = None
1463 1463 for d in branch_copies1.dirmove:
1464 1464 if f.startswith(d):
1465 1465 # new file added in a directory that was moved
1466 1466 df = branch_copies1.dirmove[d] + f[len(d) :]
1467 1467 break
1468 1468 if df is not None and df in m1:
1469 1469 actions[df] = (
1470 1470 ACTION_MERGE,
1471 1471 (df, f, f, False, pa.node()),
1472 1472 b'local directory rename - respect move '
1473 1473 b'from %s' % f,
1474 1474 )
1475 1475 elif acceptremote:
1476 1476 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1477 1477 else:
1478 1478 actions[f] = (
1479 1479 ACTION_DELETED_CHANGED,
1480 1480 (None, f, f, False, pa.node()),
1481 1481 b'prompt deleted/changed',
1482 1482 )
1483 1483
1484 1484 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1485 1485 # If we are merging, look for path conflicts.
1486 1486 checkpathconflicts(repo, wctx, p2, actions)
1487 1487
1488 1488 narrowmatch = repo.narrowmatch()
1489 1489 if not narrowmatch.always():
1490 1490 # Updates "actions" in place
1491 1491 _filternarrowactions(narrowmatch, branchmerge, actions)
1492 1492
1493 1493 renamedelete = branch_copies1.renamedelete
1494 1494 renamedelete.update(branch_copies2.renamedelete)
1495 1495
1496 1496 return actions, diverge, renamedelete
1497 1497
1498 1498
1499 1499 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1500 1500 """Resolves false conflicts where the nodeid changed but the content
1501 1501 remained the same."""
1502 1502 # We force a copy of actions.items() because we're going to mutate
1503 1503 # actions as we resolve trivial conflicts.
1504 1504 for f, (m, args, msg) in list(actions.items()):
1505 1505 if (
1506 1506 m == ACTION_CHANGED_DELETED
1507 1507 and f in ancestor
1508 1508 and not wctx[f].cmp(ancestor[f])
1509 1509 ):
1510 1510 # local did change but ended up with same content
1511 1511 actions[f] = ACTION_REMOVE, None, b'prompt same'
1512 1512 elif (
1513 1513 m == ACTION_DELETED_CHANGED
1514 1514 and f in ancestor
1515 1515 and not mctx[f].cmp(ancestor[f])
1516 1516 ):
1517 1517 # remote did change but ended up with same content
1518 1518 del actions[f] # don't get = keep local deleted
1519 1519
1520 1520
1521 1521 def calculateupdates(
1522 1522 repo,
1523 1523 wctx,
1524 1524 mctx,
1525 1525 ancestors,
1526 1526 branchmerge,
1527 1527 force,
1528 1528 acceptremote,
1529 1529 followcopies,
1530 1530 matcher=None,
1531 1531 mergeforce=False,
1532 1532 ):
1533 1533 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1534 1534 # Avoid cycle.
1535 1535 from . import sparse
1536 1536
1537 1537 if len(ancestors) == 1: # default
1538 1538 actions, diverge, renamedelete = manifestmerge(
1539 1539 repo,
1540 1540 wctx,
1541 1541 mctx,
1542 1542 ancestors[0],
1543 1543 branchmerge,
1544 1544 force,
1545 1545 matcher,
1546 1546 acceptremote,
1547 1547 followcopies,
1548 1548 )
1549 1549 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1550 1550
1551 1551 else: # only when merge.preferancestor=* - the default
1552 1552 repo.ui.note(
1553 1553 _(b"note: merging %s and %s using bids from ancestors %s\n")
1554 1554 % (
1555 1555 wctx,
1556 1556 mctx,
1557 1557 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1558 1558 )
1559 1559 )
1560 1560
1561 1561 # Call for bids
1562 1562 fbids = (
1563 1563 {}
1564 1564 ) # mapping filename to bids (action method to list af actions)
1565 1565 diverge, renamedelete = None, None
1566 1566 for ancestor in ancestors:
1567 1567 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1568 1568 actions, diverge1, renamedelete1 = manifestmerge(
1569 1569 repo,
1570 1570 wctx,
1571 1571 mctx,
1572 1572 ancestor,
1573 1573 branchmerge,
1574 1574 force,
1575 1575 matcher,
1576 1576 acceptremote,
1577 1577 followcopies,
1578 1578 forcefulldiff=True,
1579 1579 )
1580 1580 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1581 1581
1582 1582 # Track the shortest set of warning on the theory that bid
1583 1583 # merge will correctly incorporate more information
1584 1584 if diverge is None or len(diverge1) < len(diverge):
1585 1585 diverge = diverge1
1586 1586 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1587 1587 renamedelete = renamedelete1
1588 1588
1589 1589 for f, a in sorted(pycompat.iteritems(actions)):
1590 1590 m, args, msg = a
1591 1591 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1592 1592 if f in fbids:
1593 1593 d = fbids[f]
1594 1594 if m in d:
1595 1595 d[m].append(a)
1596 1596 else:
1597 1597 d[m] = [a]
1598 1598 else:
1599 1599 fbids[f] = {m: [a]}
1600 1600
1601 1601 # Pick the best bid for each file
1602 1602 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1603 1603 actions = {}
1604 1604 for f, bids in sorted(fbids.items()):
1605 1605 # bids is a mapping from action method to list af actions
1606 1606 # Consensus?
1607 1607 if len(bids) == 1: # all bids are the same kind of method
1608 1608 m, l = list(bids.items())[0]
1609 1609 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1610 1610 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1611 1611 actions[f] = l[0]
1612 1612 continue
1613 1613 # If keep is an option, just do it.
1614 1614 if ACTION_KEEP in bids:
1615 1615 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1616 1616 actions[f] = bids[ACTION_KEEP][0]
1617 1617 continue
1618 1618 # If there are gets and they all agree [how could they not?], do it.
1619 1619 if ACTION_GET in bids:
1620 1620 ga0 = bids[ACTION_GET][0]
1621 1621 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1622 1622 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1623 1623 actions[f] = ga0
1624 1624 continue
1625 1625 # TODO: Consider other simple actions such as mode changes
1626 1626 # Handle inefficient democrazy.
1627 1627 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1628 1628 for m, l in sorted(bids.items()):
1629 1629 for _f, args, msg in l:
1630 1630 repo.ui.note(b' %s -> %s\n' % (msg, m))
1631 1631 # Pick random action. TODO: Instead, prompt user when resolving
1632 1632 m, l = list(bids.items())[0]
1633 1633 repo.ui.warn(
1634 1634 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1635 1635 )
1636 1636 actions[f] = l[0]
1637 1637 continue
1638 1638 repo.ui.note(_(b'end of auction\n\n'))
1639 1639
1640 1640 if wctx.rev() is None:
1641 1641 fractions = _forgetremoved(wctx, mctx, branchmerge)
1642 1642 actions.update(fractions)
1643 1643
1644 1644 prunedactions = sparse.filterupdatesactions(
1645 1645 repo, wctx, mctx, branchmerge, actions
1646 1646 )
1647 1647 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1648 1648
1649 1649 return prunedactions, diverge, renamedelete
1650 1650
1651 1651
1652 1652 def _getcwd():
1653 1653 try:
1654 1654 return encoding.getcwd()
1655 1655 except OSError as err:
1656 1656 if err.errno == errno.ENOENT:
1657 1657 return None
1658 1658 raise
1659 1659
1660 1660
1661 1661 def batchremove(repo, wctx, actions):
1662 1662 """apply removes to the working directory
1663 1663
1664 1664 yields tuples for progress updates
1665 1665 """
1666 1666 verbose = repo.ui.verbose
1667 1667 cwd = _getcwd()
1668 1668 i = 0
1669 1669 for f, args, msg in actions:
1670 1670 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1671 1671 if verbose:
1672 1672 repo.ui.note(_(b"removing %s\n") % f)
1673 1673 wctx[f].audit()
1674 1674 try:
1675 1675 wctx[f].remove(ignoremissing=True)
1676 1676 except OSError as inst:
1677 1677 repo.ui.warn(
1678 1678 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1679 1679 )
1680 1680 if i == 100:
1681 1681 yield i, f
1682 1682 i = 0
1683 1683 i += 1
1684 1684 if i > 0:
1685 1685 yield i, f
1686 1686
1687 1687 if cwd and not _getcwd():
1688 1688 # cwd was removed in the course of removing files; print a helpful
1689 1689 # warning.
1690 1690 repo.ui.warn(
1691 1691 _(
1692 1692 b"current directory was removed\n"
1693 1693 b"(consider changing to repo root: %s)\n"
1694 1694 )
1695 1695 % repo.root
1696 1696 )
1697 1697
1698 1698
1699 1699 def batchget(repo, mctx, wctx, wantfiledata, actions):
1700 1700 """apply gets to the working directory
1701 1701
1702 1702 mctx is the context to get from
1703 1703
1704 1704 Yields arbitrarily many (False, tuple) for progress updates, followed by
1705 1705 exactly one (True, filedata). When wantfiledata is false, filedata is an
1706 1706 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1707 1707 mtime) of the file f written for each action.
1708 1708 """
1709 1709 filedata = {}
1710 1710 verbose = repo.ui.verbose
1711 1711 fctx = mctx.filectx
1712 1712 ui = repo.ui
1713 1713 i = 0
1714 1714 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1715 1715 for f, (flags, backup), msg in actions:
1716 1716 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1717 1717 if verbose:
1718 1718 repo.ui.note(_(b"getting %s\n") % f)
1719 1719
1720 1720 if backup:
1721 1721 # If a file or directory exists with the same name, back that
1722 1722 # up. Otherwise, look to see if there is a file that conflicts
1723 1723 # with a directory this file is in, and if so, back that up.
1724 1724 conflicting = f
1725 1725 if not repo.wvfs.lexists(f):
1726 1726 for p in pathutil.finddirs(f):
1727 1727 if repo.wvfs.isfileorlink(p):
1728 1728 conflicting = p
1729 1729 break
1730 1730 if repo.wvfs.lexists(conflicting):
1731 1731 orig = scmutil.backuppath(ui, repo, conflicting)
1732 1732 util.rename(repo.wjoin(conflicting), orig)
1733 1733 wfctx = wctx[f]
1734 1734 wfctx.clearunknown()
1735 1735 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1736 1736 size = wfctx.write(
1737 1737 fctx(f).data(),
1738 1738 flags,
1739 1739 backgroundclose=True,
1740 1740 atomictemp=atomictemp,
1741 1741 )
1742 1742 if wantfiledata:
1743 1743 s = wfctx.lstat()
1744 1744 mode = s.st_mode
1745 1745 mtime = s[stat.ST_MTIME]
1746 1746 filedata[f] = (mode, size, mtime) # for dirstate.normal
1747 1747 if i == 100:
1748 1748 yield False, (i, f)
1749 1749 i = 0
1750 1750 i += 1
1751 1751 if i > 0:
1752 1752 yield False, (i, f)
1753 1753 yield True, filedata
1754 1754
1755 1755
1756 1756 def _prefetchfiles(repo, ctx, actions):
1757 1757 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1758 1758 of merge actions. ``ctx`` is the context being merged in."""
1759 1759
1760 1760 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1761 1761 # don't touch the context to be merged in. 'cd' is skipped, because
1762 1762 # changed/deleted never resolves to something from the remote side.
1763 1763 oplist = [
1764 1764 actions[a]
1765 1765 for a in (
1766 1766 ACTION_GET,
1767 1767 ACTION_DELETED_CHANGED,
1768 1768 ACTION_LOCAL_DIR_RENAME_GET,
1769 1769 ACTION_MERGE,
1770 1770 )
1771 1771 ]
1772 1772 prefetch = scmutil.prefetchfiles
1773 1773 matchfiles = scmutil.matchfiles
1774 1774 prefetch(
1775 1775 repo,
1776 1776 [ctx.rev()],
1777 1777 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1778 1778 )
1779 1779
1780 1780
1781 1781 @attr.s(frozen=True)
1782 1782 class updateresult(object):
1783 1783 updatedcount = attr.ib()
1784 1784 mergedcount = attr.ib()
1785 1785 removedcount = attr.ib()
1786 1786 unresolvedcount = attr.ib()
1787 1787
1788 1788 def isempty(self):
1789 1789 return not (
1790 1790 self.updatedcount
1791 1791 or self.mergedcount
1792 1792 or self.removedcount
1793 1793 or self.unresolvedcount
1794 1794 )
1795 1795
1796 1796
1797 1797 def emptyactions():
1798 1798 """create an actions dict, to be populated and passed to applyupdates()"""
1799 1799 return dict(
1800 1800 (m, [])
1801 1801 for m in (
1802 1802 ACTION_ADD,
1803 1803 ACTION_ADD_MODIFIED,
1804 1804 ACTION_FORGET,
1805 1805 ACTION_GET,
1806 1806 ACTION_CHANGED_DELETED,
1807 1807 ACTION_DELETED_CHANGED,
1808 1808 ACTION_REMOVE,
1809 1809 ACTION_DIR_RENAME_MOVE_LOCAL,
1810 1810 ACTION_LOCAL_DIR_RENAME_GET,
1811 1811 ACTION_MERGE,
1812 1812 ACTION_EXEC,
1813 1813 ACTION_KEEP,
1814 1814 ACTION_PATH_CONFLICT,
1815 1815 ACTION_PATH_CONFLICT_RESOLVE,
1816 1816 )
1817 1817 )
1818 1818
1819 1819
1820 1820 def applyupdates(
1821 1821 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1822 1822 ):
1823 1823 """apply the merge action list to the working directory
1824 1824
1825 1825 wctx is the working copy context
1826 1826 mctx is the context to be merged into the working copy
1827 1827
1828 1828 Return a tuple of (counts, filedata), where counts is a tuple
1829 1829 (updated, merged, removed, unresolved) that describes how many
1830 1830 files were affected by the update, and filedata is as described in
1831 1831 batchget.
1832 1832 """
1833 1833
1834 1834 _prefetchfiles(repo, mctx, actions)
1835 1835
1836 1836 updated, merged, removed = 0, 0, 0
1837 1837 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1838 1838 moves = []
1839 1839 for m, l in actions.items():
1840 1840 l.sort()
1841 1841
1842 1842 # 'cd' and 'dc' actions are treated like other merge conflicts
1843 1843 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1844 1844 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1845 1845 mergeactions.extend(actions[ACTION_MERGE])
1846 1846 for f, args, msg in mergeactions:
1847 1847 f1, f2, fa, move, anc = args
1848 1848 if f == b'.hgsubstate': # merged internally
1849 1849 continue
1850 1850 if f1 is None:
1851 1851 fcl = filemerge.absentfilectx(wctx, fa)
1852 1852 else:
1853 1853 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1854 1854 fcl = wctx[f1]
1855 1855 if f2 is None:
1856 1856 fco = filemerge.absentfilectx(mctx, fa)
1857 1857 else:
1858 1858 fco = mctx[f2]
1859 1859 actx = repo[anc]
1860 1860 if fa in actx:
1861 1861 fca = actx[fa]
1862 1862 else:
1863 1863 # TODO: move to absentfilectx
1864 1864 fca = repo.filectx(f1, fileid=nullrev)
1865 1865 ms.add(fcl, fco, fca, f)
1866 1866 if f1 != f and move:
1867 1867 moves.append(f1)
1868 1868
1869 1869 # remove renamed files after safely stored
1870 1870 for f in moves:
1871 1871 if wctx[f].lexists():
1872 1872 repo.ui.debug(b"removing %s\n" % f)
1873 1873 wctx[f].audit()
1874 1874 wctx[f].remove()
1875 1875
1876 1876 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1877 1877 progress = repo.ui.makeprogress(
1878 1878 _(b'updating'), unit=_(b'files'), total=numupdates
1879 1879 )
1880 1880
1881 1881 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1882 1882 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1883 1883
1884 1884 # record path conflicts
1885 1885 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1886 1886 f1, fo = args
1887 1887 s = repo.ui.status
1888 1888 s(
1889 1889 _(
1890 1890 b"%s: path conflict - a file or link has the same name as a "
1891 1891 b"directory\n"
1892 1892 )
1893 1893 % f
1894 1894 )
1895 1895 if fo == b'l':
1896 1896 s(_(b"the local file has been renamed to %s\n") % f1)
1897 1897 else:
1898 1898 s(_(b"the remote file has been renamed to %s\n") % f1)
1899 1899 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1900 1900 ms.addpath(f, f1, fo)
1901 1901 progress.increment(item=f)
1902 1902
1903 1903 # When merging in-memory, we can't support worker processes, so set the
1904 1904 # per-item cost at 0 in that case.
1905 1905 cost = 0 if wctx.isinmemory() else 0.001
1906 1906
1907 1907 # remove in parallel (must come before resolving path conflicts and getting)
1908 1908 prog = worker.worker(
1909 1909 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1910 1910 )
1911 1911 for i, item in prog:
1912 1912 progress.increment(step=i, item=item)
1913 1913 removed = len(actions[ACTION_REMOVE])
1914 1914
1915 1915 # resolve path conflicts (must come before getting)
1916 1916 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1917 1917 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1918 1918 (f0,) = args
1919 1919 if wctx[f0].lexists():
1920 1920 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1921 1921 wctx[f].audit()
1922 1922 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1923 1923 wctx[f0].remove()
1924 1924 progress.increment(item=f)
1925 1925
1926 1926 # get in parallel.
1927 1927 threadsafe = repo.ui.configbool(
1928 1928 b'experimental', b'worker.wdir-get-thread-safe'
1929 1929 )
1930 1930 prog = worker.worker(
1931 1931 repo.ui,
1932 1932 cost,
1933 1933 batchget,
1934 1934 (repo, mctx, wctx, wantfiledata),
1935 1935 actions[ACTION_GET],
1936 1936 threadsafe=threadsafe,
1937 1937 hasretval=True,
1938 1938 )
1939 1939 getfiledata = {}
1940 1940 for final, res in prog:
1941 1941 if final:
1942 1942 getfiledata = res
1943 1943 else:
1944 1944 i, item = res
1945 1945 progress.increment(step=i, item=item)
1946 1946 updated = len(actions[ACTION_GET])
1947 1947
1948 1948 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1949 1949 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1950 1950
1951 1951 # forget (manifest only, just log it) (must come first)
1952 1952 for f, args, msg in actions[ACTION_FORGET]:
1953 1953 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1954 1954 progress.increment(item=f)
1955 1955
1956 1956 # re-add (manifest only, just log it)
1957 1957 for f, args, msg in actions[ACTION_ADD]:
1958 1958 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1959 1959 progress.increment(item=f)
1960 1960
1961 1961 # re-add/mark as modified (manifest only, just log it)
1962 1962 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1963 1963 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1964 1964 progress.increment(item=f)
1965 1965
1966 1966 # keep (noop, just log it)
1967 1967 for f, args, msg in actions[ACTION_KEEP]:
1968 1968 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1969 1969 # no progress
1970 1970
1971 1971 # directory rename, move local
1972 1972 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1973 1973 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1974 1974 progress.increment(item=f)
1975 1975 f0, flags = args
1976 1976 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1977 1977 wctx[f].audit()
1978 1978 wctx[f].write(wctx.filectx(f0).data(), flags)
1979 1979 wctx[f0].remove()
1980 1980 updated += 1
1981 1981
1982 1982 # local directory rename, get
1983 1983 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1984 1984 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1985 1985 progress.increment(item=f)
1986 1986 f0, flags = args
1987 1987 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1988 1988 wctx[f].write(mctx.filectx(f0).data(), flags)
1989 1989 updated += 1
1990 1990
1991 1991 # exec
1992 1992 for f, args, msg in actions[ACTION_EXEC]:
1993 1993 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1994 1994 progress.increment(item=f)
1995 1995 (flags,) = args
1996 1996 wctx[f].audit()
1997 1997 wctx[f].setflags(b'l' in flags, b'x' in flags)
1998 1998 updated += 1
1999 1999
2000 2000 # the ordering is important here -- ms.mergedriver will raise if the merge
2001 2001 # driver has changed, and we want to be able to bypass it when overwrite is
2002 2002 # True
2003 2003 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2004 2004
2005 2005 if usemergedriver:
2006 2006 if wctx.isinmemory():
2007 2007 raise error.InMemoryMergeConflictsError(
2008 2008 b"in-memory merge does not support mergedriver"
2009 2009 )
2010 2010 ms.commit()
2011 2011 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2012 2012 # the driver might leave some files unresolved
2013 2013 unresolvedf = set(ms.unresolved())
2014 2014 if not proceed:
2015 2015 # XXX setting unresolved to at least 1 is a hack to make sure we
2016 2016 # error out
2017 2017 return updateresult(
2018 2018 updated, merged, removed, max(len(unresolvedf), 1)
2019 2019 )
2020 2020 newactions = []
2021 2021 for f, args, msg in mergeactions:
2022 2022 if f in unresolvedf:
2023 2023 newactions.append((f, args, msg))
2024 2024 mergeactions = newactions
2025 2025
2026 2026 try:
2027 2027 # premerge
2028 2028 tocomplete = []
2029 2029 for f, args, msg in mergeactions:
2030 2030 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2031 2031 progress.increment(item=f)
2032 2032 if f == b'.hgsubstate': # subrepo states need updating
2033 2033 subrepoutil.submerge(
2034 2034 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2035 2035 )
2036 2036 continue
2037 2037 wctx[f].audit()
2038 2038 complete, r = ms.preresolve(f, wctx)
2039 2039 if not complete:
2040 2040 numupdates += 1
2041 2041 tocomplete.append((f, args, msg))
2042 2042
2043 2043 # merge
2044 2044 for f, args, msg in tocomplete:
2045 2045 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2046 2046 progress.increment(item=f, total=numupdates)
2047 2047 ms.resolve(f, wctx)
2048 2048
2049 2049 finally:
2050 2050 ms.commit()
2051 2051
2052 2052 unresolved = ms.unresolvedcount()
2053 2053
2054 2054 if (
2055 2055 usemergedriver
2056 2056 and not unresolved
2057 2057 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2058 2058 ):
2059 2059 if not driverconclude(repo, ms, wctx, labels=labels):
2060 2060 # XXX setting unresolved to at least 1 is a hack to make sure we
2061 2061 # error out
2062 2062 unresolved = max(unresolved, 1)
2063 2063
2064 2064 ms.commit()
2065 2065
2066 2066 msupdated, msmerged, msremoved = ms.counts()
2067 2067 updated += msupdated
2068 2068 merged += msmerged
2069 2069 removed += msremoved
2070 2070
2071 2071 extraactions = ms.actions()
2072 2072 if extraactions:
2073 2073 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2074 2074 for k, acts in pycompat.iteritems(extraactions):
2075 2075 actions[k].extend(acts)
2076 2076 if k == ACTION_GET and wantfiledata:
2077 2077 # no filedata until mergestate is updated to provide it
2078 2078 for a in acts:
2079 2079 getfiledata[a[0]] = None
2080 2080 # Remove these files from actions[ACTION_MERGE] as well. This is
2081 2081 # important because in recordupdates, files in actions[ACTION_MERGE]
2082 2082 # are processed after files in other actions, and the merge driver
2083 2083 # might add files to those actions via extraactions above. This can
2084 2084 # lead to a file being recorded twice, with poor results. This is
2085 2085 # especially problematic for actions[ACTION_REMOVE] (currently only
2086 2086 # possible with the merge driver in the initial merge process;
2087 2087 # interrupted merges don't go through this flow).
2088 2088 #
2089 2089 # The real fix here is to have indexes by both file and action so
2090 2090 # that when the action for a file is changed it is automatically
2091 2091 # reflected in the other action lists. But that involves a more
2092 2092 # complex data structure, so this will do for now.
2093 2093 #
2094 2094 # We don't need to do the same operation for 'dc' and 'cd' because
2095 2095 # those lists aren't consulted again.
2096 2096 mfiles.difference_update(a[0] for a in acts)
2097 2097
2098 2098 actions[ACTION_MERGE] = [
2099 2099 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2100 2100 ]
2101 2101
2102 2102 progress.complete()
2103 2103 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2104 2104 return updateresult(updated, merged, removed, unresolved), getfiledata
2105 2105
2106 2106
2107 2107 def recordupdates(repo, actions, branchmerge, getfiledata):
2108 2108 """record merge actions to the dirstate"""
2109 2109 # remove (must come first)
2110 2110 for f, args, msg in actions.get(ACTION_REMOVE, []):
2111 2111 if branchmerge:
2112 2112 repo.dirstate.remove(f)
2113 2113 else:
2114 2114 repo.dirstate.drop(f)
2115 2115
2116 2116 # forget (must come first)
2117 2117 for f, args, msg in actions.get(ACTION_FORGET, []):
2118 2118 repo.dirstate.drop(f)
2119 2119
2120 2120 # resolve path conflicts
2121 2121 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2122 2122 (f0,) = args
2123 2123 origf0 = repo.dirstate.copied(f0) or f0
2124 2124 repo.dirstate.add(f)
2125 2125 repo.dirstate.copy(origf0, f)
2126 2126 if f0 == origf0:
2127 2127 repo.dirstate.remove(f0)
2128 2128 else:
2129 2129 repo.dirstate.drop(f0)
2130 2130
2131 2131 # re-add
2132 2132 for f, args, msg in actions.get(ACTION_ADD, []):
2133 2133 repo.dirstate.add(f)
2134 2134
2135 2135 # re-add/mark as modified
2136 2136 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2137 2137 if branchmerge:
2138 2138 repo.dirstate.normallookup(f)
2139 2139 else:
2140 2140 repo.dirstate.add(f)
2141 2141
2142 2142 # exec change
2143 2143 for f, args, msg in actions.get(ACTION_EXEC, []):
2144 2144 repo.dirstate.normallookup(f)
2145 2145
2146 2146 # keep
2147 2147 for f, args, msg in actions.get(ACTION_KEEP, []):
2148 2148 pass
2149 2149
2150 2150 # get
2151 2151 for f, args, msg in actions.get(ACTION_GET, []):
2152 2152 if branchmerge:
2153 2153 repo.dirstate.otherparent(f)
2154 2154 else:
2155 2155 parentfiledata = getfiledata[f] if getfiledata else None
2156 2156 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2157 2157
2158 2158 # merge
2159 2159 for f, args, msg in actions.get(ACTION_MERGE, []):
2160 2160 f1, f2, fa, move, anc = args
2161 2161 if branchmerge:
2162 2162 # We've done a branch merge, mark this file as merged
2163 2163 # so that we properly record the merger later
2164 2164 repo.dirstate.merge(f)
2165 2165 if f1 != f2: # copy/rename
2166 2166 if move:
2167 2167 repo.dirstate.remove(f1)
2168 2168 if f1 != f:
2169 2169 repo.dirstate.copy(f1, f)
2170 2170 else:
2171 2171 repo.dirstate.copy(f2, f)
2172 2172 else:
2173 2173 # We've update-merged a locally modified file, so
2174 2174 # we set the dirstate to emulate a normal checkout
2175 2175 # of that file some time in the past. Thus our
2176 2176 # merge will appear as a normal local file
2177 2177 # modification.
2178 2178 if f2 == f: # file not locally copied/moved
2179 2179 repo.dirstate.normallookup(f)
2180 2180 if move:
2181 2181 repo.dirstate.drop(f1)
2182 2182
2183 2183 # directory rename, move local
2184 2184 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2185 2185 f0, flag = args
2186 2186 if branchmerge:
2187 2187 repo.dirstate.add(f)
2188 2188 repo.dirstate.remove(f0)
2189 2189 repo.dirstate.copy(f0, f)
2190 2190 else:
2191 2191 repo.dirstate.normal(f)
2192 2192 repo.dirstate.drop(f0)
2193 2193
2194 2194 # directory rename, get
2195 2195 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2196 2196 f0, flag = args
2197 2197 if branchmerge:
2198 2198 repo.dirstate.add(f)
2199 2199 repo.dirstate.copy(f0, f)
2200 2200 else:
2201 2201 repo.dirstate.normal(f)
2202 2202
2203 2203
2204 2204 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2205 2205 UPDATECHECK_NONE = b'none'
2206 2206 UPDATECHECK_LINEAR = b'linear'
2207 2207 UPDATECHECK_NO_CONFLICT = b'noconflict'
2208 2208
2209 2209
2210 2210 def update(
2211 2211 repo,
2212 2212 node,
2213 2213 branchmerge,
2214 2214 force,
2215 2215 ancestor=None,
2216 2216 mergeancestor=False,
2217 2217 labels=None,
2218 2218 matcher=None,
2219 2219 mergeforce=False,
2220 2220 updatedirstate=True,
2221 2221 updatecheck=None,
2222 2222 wc=None,
2223 2223 ):
2224 2224 """
2225 2225 Perform a merge between the working directory and the given node
2226 2226
2227 2227 node = the node to update to
2228 2228 branchmerge = whether to merge between branches
2229 2229 force = whether to force branch merging or file overwriting
2230 2230 matcher = a matcher to filter file lists (dirstate not updated)
2231 2231 mergeancestor = whether it is merging with an ancestor. If true,
2232 2232 we should accept the incoming changes for any prompts that occur.
2233 2233 If false, merging with an ancestor (fast-forward) is only allowed
2234 2234 between different named branches. This flag is used by rebase extension
2235 2235 as a temporary fix and should be avoided in general.
2236 2236 labels = labels to use for base, local and other
2237 2237 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2238 2238 this is True, then 'force' should be True as well.
2239 2239
2240 2240 The table below shows all the behaviors of the update command given the
2241 2241 -c/--check and -C/--clean or no options, whether the working directory is
2242 2242 dirty, whether a revision is specified, and the relationship of the parent
2243 2243 rev to the target rev (linear or not). Match from top first. The -n
2244 2244 option doesn't exist on the command line, but represents the
2245 2245 experimental.updatecheck=noconflict option.
2246 2246
2247 2247 This logic is tested by test-update-branches.t.
2248 2248
2249 2249 -c -C -n -m dirty rev linear | result
2250 2250 y y * * * * * | (1)
2251 2251 y * y * * * * | (1)
2252 2252 y * * y * * * | (1)
2253 2253 * y y * * * * | (1)
2254 2254 * y * y * * * | (1)
2255 2255 * * y y * * * | (1)
2256 2256 * * * * * n n | x
2257 2257 * * * * n * * | ok
2258 2258 n n n n y * y | merge
2259 2259 n n n n y y n | (2)
2260 2260 n n n y y * * | merge
2261 2261 n n y n y * * | merge if no conflict
2262 2262 n y n n y * * | discard
2263 2263 y n n n y * * | (3)
2264 2264
2265 2265 x = can't happen
2266 2266 * = don't-care
2267 2267 1 = incompatible options (checked in commands.py)
2268 2268 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2269 2269 3 = abort: uncommitted changes (checked in commands.py)
2270 2270
2271 2271 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2272 2272 to repo[None] if None is passed.
2273 2273
2274 2274 Return the same tuple as applyupdates().
2275 2275 """
2276 2276 # Avoid cycle.
2277 2277 from . import sparse
2278 2278
2279 2279 # This function used to find the default destination if node was None, but
2280 2280 # that's now in destutil.py.
2281 2281 assert node is not None
2282 2282 if not branchmerge and not force:
2283 2283 # TODO: remove the default once all callers that pass branchmerge=False
2284 2284 # and force=False pass a value for updatecheck. We may want to allow
2285 2285 # updatecheck='abort' to better suppport some of these callers.
2286 2286 if updatecheck is None:
2287 2287 updatecheck = UPDATECHECK_LINEAR
2288 2288 if updatecheck not in (
2289 2289 UPDATECHECK_NONE,
2290 2290 UPDATECHECK_LINEAR,
2291 2291 UPDATECHECK_NO_CONFLICT,
2292 2292 ):
2293 2293 raise ValueError(
2294 2294 r'Invalid updatecheck %r (can accept %r)'
2295 2295 % (
2296 2296 updatecheck,
2297 2297 (
2298 2298 UPDATECHECK_NONE,
2299 2299 UPDATECHECK_LINEAR,
2300 2300 UPDATECHECK_NO_CONFLICT,
2301 2301 ),
2302 2302 )
2303 2303 )
2304 2304 with repo.wlock():
2305 2305 if wc is None:
2306 2306 wc = repo[None]
2307 2307 pl = wc.parents()
2308 2308 p1 = pl[0]
2309 2309 p2 = repo[node]
2310 2310 if ancestor is not None:
2311 2311 pas = [repo[ancestor]]
2312 2312 else:
2313 2313 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2314 2314 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2315 2315 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2316 2316 else:
2317 2317 pas = [p1.ancestor(p2, warn=branchmerge)]
2318 2318
2319 2319 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2320 2320
2321 2321 overwrite = force and not branchmerge
2322 2322 ### check phase
2323 2323 if not overwrite:
2324 2324 if len(pl) > 1:
2325 2325 raise error.Abort(_(b"outstanding uncommitted merge"))
2326 2326 ms = mergestate.read(repo)
2327 2327 if list(ms.unresolved()):
2328 2328 raise error.Abort(
2329 2329 _(b"outstanding merge conflicts"),
2330 2330 hint=_(b"use 'hg resolve' to resolve"),
2331 2331 )
2332 2332 if branchmerge:
2333 2333 if pas == [p2]:
2334 2334 raise error.Abort(
2335 2335 _(
2336 2336 b"merging with a working directory ancestor"
2337 2337 b" has no effect"
2338 2338 )
2339 2339 )
2340 2340 elif pas == [p1]:
2341 2341 if not mergeancestor and wc.branch() == p2.branch():
2342 2342 raise error.Abort(
2343 2343 _(b"nothing to merge"),
2344 2344 hint=_(b"use 'hg update' or check 'hg heads'"),
2345 2345 )
2346 2346 if not force and (wc.files() or wc.deleted()):
2347 2347 raise error.Abort(
2348 2348 _(b"uncommitted changes"),
2349 2349 hint=_(b"use 'hg status' to list changes"),
2350 2350 )
2351 2351 if not wc.isinmemory():
2352 2352 for s in sorted(wc.substate):
2353 2353 wc.sub(s).bailifchanged()
2354 2354
2355 2355 elif not overwrite:
2356 2356 if p1 == p2: # no-op update
2357 2357 # call the hooks and exit early
2358 2358 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2359 2359 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2360 2360 return updateresult(0, 0, 0, 0)
2361 2361
2362 2362 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2363 2363 [p1],
2364 2364 [p2],
2365 2365 ): # nonlinear
2366 2366 dirty = wc.dirty(missing=True)
2367 2367 if dirty:
2368 2368 # Branching is a bit strange to ensure we do the minimal
2369 2369 # amount of call to obsutil.foreground.
2370 2370 foreground = obsutil.foreground(repo, [p1.node()])
2371 2371 # note: the <node> variable contains a random identifier
2372 2372 if repo[node].node() in foreground:
2373 2373 pass # allow updating to successors
2374 2374 else:
2375 2375 msg = _(b"uncommitted changes")
2376 2376 hint = _(b"commit or update --clean to discard changes")
2377 2377 raise error.UpdateAbort(msg, hint=hint)
2378 2378 else:
2379 2379 # Allow jumping branches if clean and specific rev given
2380 2380 pass
2381 2381
2382 2382 if overwrite:
2383 2383 pas = [wc]
2384 2384 elif not branchmerge:
2385 2385 pas = [p1]
2386 2386
2387 2387 # deprecated config: merge.followcopies
2388 2388 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2389 2389 if overwrite:
2390 2390 followcopies = False
2391 2391 elif not pas[0]:
2392 2392 followcopies = False
2393 2393 if not branchmerge and not wc.dirty(missing=True):
2394 2394 followcopies = False
2395 2395
2396 2396 ### calculate phase
2397 2397 actionbyfile, diverge, renamedelete = calculateupdates(
2398 2398 repo,
2399 2399 wc,
2400 2400 p2,
2401 2401 pas,
2402 2402 branchmerge,
2403 2403 force,
2404 2404 mergeancestor,
2405 2405 followcopies,
2406 2406 matcher=matcher,
2407 2407 mergeforce=mergeforce,
2408 2408 )
2409 2409
2410 2410 if updatecheck == UPDATECHECK_NO_CONFLICT:
2411 2411 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2412 2412 if m not in (
2413 2413 ACTION_GET,
2414 2414 ACTION_KEEP,
2415 2415 ACTION_EXEC,
2416 2416 ACTION_REMOVE,
2417 2417 ACTION_PATH_CONFLICT_RESOLVE,
2418 2418 ):
2419 2419 msg = _(b"conflicting changes")
2420 2420 hint = _(b"commit or update --clean to discard changes")
2421 2421 raise error.Abort(msg, hint=hint)
2422 2422
2423 2423 # Prompt and create actions. Most of this is in the resolve phase
2424 2424 # already, but we can't handle .hgsubstate in filemerge or
2425 2425 # subrepoutil.submerge yet so we have to keep prompting for it.
2426 2426 if b'.hgsubstate' in actionbyfile:
2427 2427 f = b'.hgsubstate'
2428 2428 m, args, msg = actionbyfile[f]
2429 2429 prompts = filemerge.partextras(labels)
2430 2430 prompts[b'f'] = f
2431 2431 if m == ACTION_CHANGED_DELETED:
2432 2432 if repo.ui.promptchoice(
2433 2433 _(
2434 2434 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2435 2435 b"use (c)hanged version or (d)elete?"
2436 2436 b"$$ &Changed $$ &Delete"
2437 2437 )
2438 2438 % prompts,
2439 2439 0,
2440 2440 ):
2441 2441 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2442 2442 elif f in p1:
2443 2443 actionbyfile[f] = (
2444 2444 ACTION_ADD_MODIFIED,
2445 2445 None,
2446 2446 b'prompt keep',
2447 2447 )
2448 2448 else:
2449 2449 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2450 2450 elif m == ACTION_DELETED_CHANGED:
2451 2451 f1, f2, fa, move, anc = args
2452 2452 flags = p2[f2].flags()
2453 2453 if (
2454 2454 repo.ui.promptchoice(
2455 2455 _(
2456 2456 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2457 2457 b"use (c)hanged version or leave (d)eleted?"
2458 2458 b"$$ &Changed $$ &Deleted"
2459 2459 )
2460 2460 % prompts,
2461 2461 0,
2462 2462 )
2463 2463 == 0
2464 2464 ):
2465 2465 actionbyfile[f] = (
2466 2466 ACTION_GET,
2467 2467 (flags, False),
2468 2468 b'prompt recreating',
2469 2469 )
2470 2470 else:
2471 2471 del actionbyfile[f]
2472 2472
2473 2473 # Convert to dictionary-of-lists format
2474 2474 actions = emptyactions()
2475 2475 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2476 2476 if m not in actions:
2477 2477 actions[m] = []
2478 2478 actions[m].append((f, args, msg))
2479 2479
2480 2480 if not util.fscasesensitive(repo.path):
2481 2481 # check collision between files only in p2 for clean update
2482 2482 if not branchmerge and (
2483 2483 force or not wc.dirty(missing=True, branch=False)
2484 2484 ):
2485 2485 _checkcollision(repo, p2.manifest(), None)
2486 2486 else:
2487 2487 _checkcollision(repo, wc.manifest(), actions)
2488 2488
2489 2489 # divergent renames
2490 2490 for f, fl in sorted(pycompat.iteritems(diverge)):
2491 2491 repo.ui.warn(
2492 2492 _(
2493 2493 b"note: possible conflict - %s was renamed "
2494 2494 b"multiple times to:\n"
2495 2495 )
2496 2496 % f
2497 2497 )
2498 2498 for nf in sorted(fl):
2499 2499 repo.ui.warn(b" %s\n" % nf)
2500 2500
2501 2501 # rename and delete
2502 2502 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2503 2503 repo.ui.warn(
2504 2504 _(
2505 2505 b"note: possible conflict - %s was deleted "
2506 2506 b"and renamed to:\n"
2507 2507 )
2508 2508 % f
2509 2509 )
2510 2510 for nf in sorted(fl):
2511 2511 repo.ui.warn(b" %s\n" % nf)
2512 2512
2513 2513 ### apply phase
2514 2514 if not branchmerge: # just jump to the new rev
2515 2515 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2516 2516 # If we're doing a partial update, we need to skip updating
2517 2517 # the dirstate.
2518 2518 always = matcher is None or matcher.always()
2519 2519 updatedirstate = updatedirstate and always and not wc.isinmemory()
2520 2520 if updatedirstate:
2521 2521 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2522 2522 # note that we're in the middle of an update
2523 2523 repo.vfs.write(b'updatestate', p2.hex())
2524 2524
2525 2525 # Advertise fsmonitor when its presence could be useful.
2526 2526 #
2527 2527 # We only advertise when performing an update from an empty working
2528 2528 # directory. This typically only occurs during initial clone.
2529 2529 #
2530 2530 # We give users a mechanism to disable the warning in case it is
2531 2531 # annoying.
2532 2532 #
2533 2533 # We only allow on Linux and MacOS because that's where fsmonitor is
2534 2534 # considered stable.
2535 2535 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2536 2536 fsmonitorthreshold = repo.ui.configint(
2537 2537 b'fsmonitor', b'warn_update_file_count'
2538 2538 )
2539 2539 try:
2540 2540 # avoid cycle: extensions -> cmdutil -> merge
2541 2541 from . import extensions
2542 2542
2543 2543 extensions.find(b'fsmonitor')
2544 2544 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2545 2545 # We intentionally don't look at whether fsmonitor has disabled
2546 2546 # itself because a) fsmonitor may have already printed a warning
2547 2547 # b) we only care about the config state here.
2548 2548 except KeyError:
2549 2549 fsmonitorenabled = False
2550 2550
2551 2551 if (
2552 2552 fsmonitorwarning
2553 2553 and not fsmonitorenabled
2554 2554 and p1.node() == nullid
2555 2555 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2556 2556 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2557 2557 ):
2558 2558 repo.ui.warn(
2559 2559 _(
2560 2560 b'(warning: large working directory being used without '
2561 2561 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2562 2562 b'see "hg help -e fsmonitor")\n'
2563 2563 )
2564 2564 )
2565 2565
2566 2566 wantfiledata = updatedirstate and not branchmerge
2567 2567 stats, getfiledata = applyupdates(
2568 2568 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2569 2569 )
2570 2570
2571 2571 if updatedirstate:
2572 2572 with repo.dirstate.parentchange():
2573 2573 repo.setparents(fp1, fp2)
2574 2574 recordupdates(repo, actions, branchmerge, getfiledata)
2575 2575 # update completed, clear state
2576 2576 util.unlink(repo.vfs.join(b'updatestate'))
2577 2577
2578 2578 if not branchmerge:
2579 2579 repo.dirstate.setbranch(p2.branch())
2580 2580
2581 2581 # If we're updating to a location, clean up any stale temporary includes
2582 2582 # (ex: this happens during hg rebase --abort).
2583 2583 if not branchmerge:
2584 2584 sparse.prunetemporaryincludes(repo)
2585 2585
2586 2586 if updatedirstate:
2587 2587 repo.hook(
2588 2588 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2589 2589 )
2590 2590 return stats
2591 2591
2592 2592
2593 def merge(ctx, labels=None, force=False, wc=None):
2594 """Merge another topological branch into the working copy.
2595
2596 force = whether the merge was run with 'merge --force' (deprecated)
2597 """
2598
2599 return update(
2600 ctx.repo(),
2601 ctx.rev(),
2602 labels=labels,
2603 branchmerge=True,
2604 force=force,
2605 mergeforce=force,
2606 wc=wc,
2607 )
2608
2609
2593 2610 def clean_update(ctx, wc=None):
2594 2611 """Do a clean update to the given commit.
2595 2612
2596 2613 This involves updating to the commit and discarding any changes in the
2597 2614 working copy.
2598 2615 """
2599 2616 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2600 2617
2601 2618
2602 2619 def revert_to(ctx, matcher=None, wc=None):
2603 2620 """Revert the working copy to the given commit.
2604 2621
2605 2622 The working copy will keep its current parent(s) but its content will
2606 2623 be the same as in the given commit.
2607 2624 """
2608 2625
2609 2626 return update(
2610 2627 ctx.repo(),
2611 2628 ctx.rev(),
2612 2629 branchmerge=False,
2613 2630 force=True,
2614 2631 updatedirstate=False,
2615 2632 matcher=matcher,
2616 2633 wc=wc,
2617 2634 )
2618 2635
2619 2636
2620 2637 def graft(
2621 2638 repo,
2622 2639 ctx,
2623 2640 base=None,
2624 2641 labels=None,
2625 2642 keepparent=False,
2626 2643 keepconflictparent=False,
2627 2644 wctx=None,
2628 2645 ):
2629 2646 """Do a graft-like merge.
2630 2647
2631 2648 This is a merge where the merge ancestor is chosen such that one
2632 2649 or more changesets are grafted onto the current changeset. In
2633 2650 addition to the merge, this fixes up the dirstate to include only
2634 2651 a single parent (if keepparent is False) and tries to duplicate any
2635 2652 renames/copies appropriately.
2636 2653
2637 2654 ctx - changeset to rebase
2638 2655 base - merge base, or ctx.p1() if not specified
2639 2656 labels - merge labels eg ['local', 'graft']
2640 2657 keepparent - keep second parent if any
2641 2658 keepconflictparent - if unresolved, keep parent used for the merge
2642 2659
2643 2660 """
2644 2661 # If we're grafting a descendant onto an ancestor, be sure to pass
2645 2662 # mergeancestor=True to update. This does two things: 1) allows the merge if
2646 2663 # the destination is the same as the parent of the ctx (so we can use graft
2647 2664 # to copy commits), and 2) informs update that the incoming changes are
2648 2665 # newer than the destination so it doesn't prompt about "remote changed foo
2649 2666 # which local deleted".
2650 2667 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2651 2668 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2652 2669 wctx = wctx or repo[None]
2653 2670 pctx = wctx.p1()
2654 2671 base = base or ctx.p1()
2655 2672 mergeancestor = (
2656 2673 repo.changelog.isancestor(pctx.node(), ctx.node())
2657 2674 or pctx.rev() == base.rev()
2658 2675 )
2659 2676
2660 2677 stats = update(
2661 2678 repo,
2662 2679 ctx.node(),
2663 2680 True,
2664 2681 True,
2665 2682 base.node(),
2666 2683 mergeancestor=mergeancestor,
2667 2684 labels=labels,
2668 2685 wc=wctx,
2669 2686 )
2670 2687
2671 2688 if keepconflictparent and stats.unresolvedcount:
2672 2689 pother = ctx.node()
2673 2690 else:
2674 2691 pother = nullid
2675 2692 parents = ctx.parents()
2676 2693 if keepparent and len(parents) == 2 and base in parents:
2677 2694 parents.remove(base)
2678 2695 pother = parents[0].node()
2679 2696 # Never set both parents equal to each other
2680 2697 if pother == pctx.node():
2681 2698 pother = nullid
2682 2699
2683 2700 if wctx.isinmemory():
2684 2701 wctx.setparents(pctx.node(), pother)
2685 2702 # fix up dirstate for copies and renames
2686 2703 copies.graftcopies(wctx, ctx, base)
2687 2704 else:
2688 2705 with repo.dirstate.parentchange():
2689 2706 repo.setparents(pctx.node(), pother)
2690 2707 repo.dirstate.write(repo.currenttransaction())
2691 2708 # fix up dirstate for copies and renames
2692 2709 copies.graftcopies(wctx, ctx, base)
2693 2710 return stats
2694 2711
2695 2712
2696 2713 def purge(
2697 2714 repo,
2698 2715 matcher,
2699 2716 unknown=True,
2700 2717 ignored=False,
2701 2718 removeemptydirs=True,
2702 2719 removefiles=True,
2703 2720 abortonerror=False,
2704 2721 noop=False,
2705 2722 ):
2706 2723 """Purge the working directory of untracked files.
2707 2724
2708 2725 ``matcher`` is a matcher configured to scan the working directory -
2709 2726 potentially a subset.
2710 2727
2711 2728 ``unknown`` controls whether unknown files should be purged.
2712 2729
2713 2730 ``ignored`` controls whether ignored files should be purged.
2714 2731
2715 2732 ``removeemptydirs`` controls whether empty directories should be removed.
2716 2733
2717 2734 ``removefiles`` controls whether files are removed.
2718 2735
2719 2736 ``abortonerror`` causes an exception to be raised if an error occurs
2720 2737 deleting a file or directory.
2721 2738
2722 2739 ``noop`` controls whether to actually remove files. If not defined, actions
2723 2740 will be taken.
2724 2741
2725 2742 Returns an iterable of relative paths in the working directory that were
2726 2743 or would be removed.
2727 2744 """
2728 2745
2729 2746 def remove(removefn, path):
2730 2747 try:
2731 2748 removefn(path)
2732 2749 except OSError:
2733 2750 m = _(b'%s cannot be removed') % path
2734 2751 if abortonerror:
2735 2752 raise error.Abort(m)
2736 2753 else:
2737 2754 repo.ui.warn(_(b'warning: %s\n') % m)
2738 2755
2739 2756 # There's no API to copy a matcher. So mutate the passed matcher and
2740 2757 # restore it when we're done.
2741 2758 oldtraversedir = matcher.traversedir
2742 2759
2743 2760 res = []
2744 2761
2745 2762 try:
2746 2763 if removeemptydirs:
2747 2764 directories = []
2748 2765 matcher.traversedir = directories.append
2749 2766
2750 2767 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2751 2768
2752 2769 if removefiles:
2753 2770 for f in sorted(status.unknown + status.ignored):
2754 2771 if not noop:
2755 2772 repo.ui.note(_(b'removing file %s\n') % f)
2756 2773 remove(repo.wvfs.unlink, f)
2757 2774 res.append(f)
2758 2775
2759 2776 if removeemptydirs:
2760 2777 for f in sorted(directories, reverse=True):
2761 2778 if matcher(f) and not repo.wvfs.listdir(f):
2762 2779 if not noop:
2763 2780 repo.ui.note(_(b'removing directory %s\n') % f)
2764 2781 remove(repo.wvfs.rmdir, f)
2765 2782 res.append(f)
2766 2783
2767 2784 return res
2768 2785
2769 2786 finally:
2770 2787 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now