##// END OF EJS Templates
path: directly use the push_variant in outgoing internals...
marmoute -
r50596:1470a533 default
parent child Browse files
Show More
@@ -1,1636 +1,1636 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 146 repo_schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 150 }
151 151
152 152 peer_schemes = {
153 153 b'http': httppeer,
154 154 b'https': httppeer,
155 155 b'ssh': sshpeer,
156 156 b'static-http': statichttprepo,
157 157 }
158 158
159 159
160 160 def _peerlookup(path):
161 161 u = urlutil.url(path)
162 162 scheme = u.scheme or b'file'
163 163 if scheme in peer_schemes:
164 164 return peer_schemes[scheme]
165 165 if scheme in repo_schemes:
166 166 return repo_schemes[scheme]
167 167 return LocalFactory
168 168
169 169
170 170 def islocal(repo):
171 171 '''return true if repo (or path pointing to repo) is local'''
172 172 if isinstance(repo, bytes):
173 173 cls = _peerlookup(repo)
174 174 cls.instance # make sure we load the module
175 175 if util.safehasattr(cls, 'islocal'):
176 176 return cls.islocal(repo) # pytype: disable=module-attr
177 177 return False
178 178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 179 return repo.local()
180 180
181 181
182 182 def openpath(ui, path, sendaccept=True):
183 183 '''open path with open if local, url.open if remote'''
184 184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 185 if pathurl.islocal():
186 186 return util.posixfile(pathurl.localpath(), b'rb')
187 187 else:
188 188 return url.open(ui, path, sendaccept=sendaccept)
189 189
190 190
191 191 # a list of (ui, repo) functions called for wire peer initialization
192 192 wirepeersetupfuncs = []
193 193
194 194
195 195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 msg = b' > reposetup for %s took %s\n'
208 208 ui.log(b'extension', msg, name, stats)
209 209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 210 if not obj.local():
211 211 for f in wirepeersetupfuncs:
212 212 f(ui, obj)
213 213
214 214
215 215 def repository(
216 216 ui,
217 217 path=b'',
218 218 create=False,
219 219 presetupfuncs=None,
220 220 intents=None,
221 221 createopts=None,
222 222 ):
223 223 """return a repository object for the specified path"""
224 224 scheme = urlutil.url(path).scheme
225 225 if scheme is None:
226 226 scheme = b'file'
227 227 cls = repo_schemes.get(scheme)
228 228 if cls is None:
229 229 if scheme in peer_schemes:
230 230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 231 cls = LocalFactory
232 232 repo = cls.instance(
233 233 ui,
234 234 path,
235 235 create,
236 236 intents=intents,
237 237 createopts=createopts,
238 238 )
239 239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 240 return repo.filtered(b'visible')
241 241
242 242
243 243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 244 '''return a repository peer for the specified path'''
245 245 rui = remoteui(uiorrepo, opts)
246 246 scheme = urlutil.url(path).scheme
247 247 if scheme in peer_schemes:
248 248 cls = peer_schemes[scheme]
249 249 peer = cls.instance(
250 250 rui,
251 251 path,
252 252 create,
253 253 intents=intents,
254 254 createopts=createopts,
255 255 )
256 256 _setup_repo_or_peer(rui, peer)
257 257 else:
258 258 # this is a repository
259 259 repo = repository(
260 260 rui,
261 261 path,
262 262 create,
263 263 intents=intents,
264 264 createopts=createopts,
265 265 )
266 266 peer = repo.peer()
267 267 return peer
268 268
269 269
270 270 def defaultdest(source):
271 271 """return default destination of clone if none is given
272 272
273 273 >>> defaultdest(b'foo')
274 274 'foo'
275 275 >>> defaultdest(b'/foo/bar')
276 276 'bar'
277 277 >>> defaultdest(b'/')
278 278 ''
279 279 >>> defaultdest(b'')
280 280 ''
281 281 >>> defaultdest(b'http://example.org/')
282 282 ''
283 283 >>> defaultdest(b'http://example.org/foo/')
284 284 'foo'
285 285 """
286 286 path = urlutil.url(source).path
287 287 if not path:
288 288 return b''
289 289 return os.path.basename(os.path.normpath(path))
290 290
291 291
292 292 def sharedreposource(repo):
293 293 """Returns repository object for source repository of a shared repo.
294 294
295 295 If repo is not a shared repository, returns None.
296 296 """
297 297 if repo.sharedpath == repo.path:
298 298 return None
299 299
300 300 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
301 301 return repo.srcrepo
302 302
303 303 # the sharedpath always ends in the .hg; we want the path to the repo
304 304 source = repo.vfs.split(repo.sharedpath)[0]
305 305 srcurl, branches = urlutil.parseurl(source)
306 306 srcrepo = repository(repo.ui, srcurl)
307 307 repo.srcrepo = srcrepo
308 308 return srcrepo
309 309
310 310
311 311 def share(
312 312 ui,
313 313 source,
314 314 dest=None,
315 315 update=True,
316 316 bookmarks=True,
317 317 defaultpath=None,
318 318 relative=False,
319 319 ):
320 320 '''create a shared repository'''
321 321
322 322 not_local_msg = _(b'can only share local repositories')
323 323 if util.safehasattr(source, 'local'):
324 324 if source.local() is None:
325 325 raise error.Abort(not_local_msg)
326 326 elif not islocal(source):
327 327 # XXX why are we getting bytes here ?
328 328 raise error.Abort(not_local_msg)
329 329
330 330 if not dest:
331 331 dest = defaultdest(source)
332 332 else:
333 333 dest = urlutil.get_clone_path(ui, dest)[1]
334 334
335 335 if isinstance(source, bytes):
336 336 origsource, source, branches = urlutil.get_clone_path(ui, source)
337 337 srcrepo = repository(ui, source)
338 338 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
339 339 else:
340 340 srcrepo = source.local()
341 341 checkout = None
342 342
343 343 shareditems = set()
344 344 if bookmarks:
345 345 shareditems.add(sharedbookmarks)
346 346
347 347 r = repository(
348 348 ui,
349 349 dest,
350 350 create=True,
351 351 createopts={
352 352 b'sharedrepo': srcrepo,
353 353 b'sharedrelative': relative,
354 354 b'shareditems': shareditems,
355 355 },
356 356 )
357 357
358 358 postshare(srcrepo, r, defaultpath=defaultpath)
359 359 r = repository(ui, dest)
360 360 _postshareupdate(r, update, checkout=checkout)
361 361 return r
362 362
363 363
364 364 def _prependsourcehgrc(repo):
365 365 """copies the source repo config and prepend it in current repo .hg/hgrc
366 366 on unshare. This is only done if the share was perfomed using share safe
367 367 method where we share config of source in shares"""
368 368 srcvfs = vfsmod.vfs(repo.sharedpath)
369 369 dstvfs = vfsmod.vfs(repo.path)
370 370
371 371 if not srcvfs.exists(b'hgrc'):
372 372 return
373 373
374 374 currentconfig = b''
375 375 if dstvfs.exists(b'hgrc'):
376 376 currentconfig = dstvfs.read(b'hgrc')
377 377
378 378 with dstvfs(b'hgrc', b'wb') as fp:
379 379 sourceconfig = srcvfs.read(b'hgrc')
380 380 fp.write(b"# Config copied from shared source\n")
381 381 fp.write(sourceconfig)
382 382 fp.write(b'\n')
383 383 fp.write(currentconfig)
384 384
385 385
386 386 def unshare(ui, repo):
387 387 """convert a shared repository to a normal one
388 388
389 389 Copy the store data to the repo and remove the sharedpath data.
390 390
391 391 Returns a new repository object representing the unshared repository.
392 392
393 393 The passed repository object is not usable after this function is
394 394 called.
395 395 """
396 396
397 397 with repo.lock():
398 398 # we use locks here because if we race with commit, we
399 399 # can end up with extra data in the cloned revlogs that's
400 400 # not pointed to by changesets, thus causing verify to
401 401 # fail
402 402 destlock = copystore(ui, repo, repo.path)
403 403 with destlock or util.nullcontextmanager():
404 404 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
405 405 # we were sharing .hg/hgrc of the share source with the current
406 406 # repo. We need to copy that while unsharing otherwise it can
407 407 # disable hooks and other checks
408 408 _prependsourcehgrc(repo)
409 409
410 410 sharefile = repo.vfs.join(b'sharedpath')
411 411 util.rename(sharefile, sharefile + b'.old')
412 412
413 413 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
414 414 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
415 415 scmutil.writereporequirements(repo)
416 416
417 417 # Removing share changes some fundamental properties of the repo instance.
418 418 # So we instantiate a new repo object and operate on it rather than
419 419 # try to keep the existing repo usable.
420 420 newrepo = repository(repo.baseui, repo.root, create=False)
421 421
422 422 # TODO: figure out how to access subrepos that exist, but were previously
423 423 # removed from .hgsub
424 424 c = newrepo[b'.']
425 425 subs = c.substate
426 426 for s in sorted(subs):
427 427 c.sub(s).unshare()
428 428
429 429 localrepo.poisonrepository(repo)
430 430
431 431 return newrepo
432 432
433 433
434 434 def postshare(sourcerepo, destrepo, defaultpath=None):
435 435 """Called after a new shared repo is created.
436 436
437 437 The new repo only has a requirements file and pointer to the source.
438 438 This function configures additional shared data.
439 439
440 440 Extensions can wrap this function and write additional entries to
441 441 destrepo/.hg/shared to indicate additional pieces of data to be shared.
442 442 """
443 443 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
444 444 if default:
445 445 template = b'[paths]\ndefault = %s\n'
446 446 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
447 447 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
448 448 with destrepo.wlock():
449 449 narrowspec.copytoworkingcopy(destrepo)
450 450
451 451
452 452 def _postshareupdate(repo, update, checkout=None):
453 453 """Maybe perform a working directory update after a shared repo is created.
454 454
455 455 ``update`` can be a boolean or a revision to update to.
456 456 """
457 457 if not update:
458 458 return
459 459
460 460 repo.ui.status(_(b"updating working directory\n"))
461 461 if update is not True:
462 462 checkout = update
463 463 for test in (checkout, b'default', b'tip'):
464 464 if test is None:
465 465 continue
466 466 try:
467 467 uprev = repo.lookup(test)
468 468 break
469 469 except error.RepoLookupError:
470 470 continue
471 471 _update(repo, uprev)
472 472
473 473
474 474 def copystore(ui, srcrepo, destpath):
475 475 """copy files from store of srcrepo in destpath
476 476
477 477 returns destlock
478 478 """
479 479 destlock = None
480 480 try:
481 481 hardlink = None
482 482 topic = _(b'linking') if hardlink else _(b'copying')
483 483 with ui.makeprogress(topic, unit=_(b'files')) as progress:
484 484 num = 0
485 485 srcpublishing = srcrepo.publishing()
486 486 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
487 487 dstvfs = vfsmod.vfs(destpath)
488 488 for f in srcrepo.store.copylist():
489 489 if srcpublishing and f.endswith(b'phaseroots'):
490 490 continue
491 491 dstbase = os.path.dirname(f)
492 492 if dstbase and not dstvfs.exists(dstbase):
493 493 dstvfs.mkdir(dstbase)
494 494 if srcvfs.exists(f):
495 495 if f.endswith(b'data'):
496 496 # 'dstbase' may be empty (e.g. revlog format 0)
497 497 lockfile = os.path.join(dstbase, b"lock")
498 498 # lock to avoid premature writing to the target
499 499 destlock = lock.lock(dstvfs, lockfile)
500 500 hardlink, n = util.copyfiles(
501 501 srcvfs.join(f), dstvfs.join(f), hardlink, progress
502 502 )
503 503 num += n
504 504 if hardlink:
505 505 ui.debug(b"linked %d files\n" % num)
506 506 else:
507 507 ui.debug(b"copied %d files\n" % num)
508 508 return destlock
509 509 except: # re-raises
510 510 release(destlock)
511 511 raise
512 512
513 513
514 514 def clonewithshare(
515 515 ui,
516 516 peeropts,
517 517 sharepath,
518 518 source,
519 519 srcpeer,
520 520 dest,
521 521 pull=False,
522 522 rev=None,
523 523 update=True,
524 524 stream=False,
525 525 ):
526 526 """Perform a clone using a shared repo.
527 527
528 528 The store for the repository will be located at <sharepath>/.hg. The
529 529 specified revisions will be cloned or pulled from "source". A shared repo
530 530 will be created at "dest" and a working copy will be created if "update" is
531 531 True.
532 532 """
533 533 revs = None
534 534 if rev:
535 535 if not srcpeer.capable(b'lookup'):
536 536 raise error.Abort(
537 537 _(
538 538 b"src repository does not support "
539 539 b"revision lookup and so doesn't "
540 540 b"support clone by revision"
541 541 )
542 542 )
543 543
544 544 # TODO this is batchable.
545 545 remoterevs = []
546 546 for r in rev:
547 547 with srcpeer.commandexecutor() as e:
548 548 remoterevs.append(
549 549 e.callcommand(
550 550 b'lookup',
551 551 {
552 552 b'key': r,
553 553 },
554 554 ).result()
555 555 )
556 556 revs = remoterevs
557 557
558 558 # Obtain a lock before checking for or cloning the pooled repo otherwise
559 559 # 2 clients may race creating or populating it.
560 560 pooldir = os.path.dirname(sharepath)
561 561 # lock class requires the directory to exist.
562 562 try:
563 563 util.makedir(pooldir, False)
564 564 except FileExistsError:
565 565 pass
566 566
567 567 poolvfs = vfsmod.vfs(pooldir)
568 568 basename = os.path.basename(sharepath)
569 569
570 570 with lock.lock(poolvfs, b'%s.lock' % basename):
571 571 if os.path.exists(sharepath):
572 572 ui.status(
573 573 _(b'(sharing from existing pooled repository %s)\n') % basename
574 574 )
575 575 else:
576 576 ui.status(
577 577 _(b'(sharing from new pooled repository %s)\n') % basename
578 578 )
579 579 # Always use pull mode because hardlinks in share mode don't work
580 580 # well. Never update because working copies aren't necessary in
581 581 # share mode.
582 582 clone(
583 583 ui,
584 584 peeropts,
585 585 source,
586 586 dest=sharepath,
587 587 pull=True,
588 588 revs=rev,
589 589 update=False,
590 590 stream=stream,
591 591 )
592 592
593 593 # Resolve the value to put in [paths] section for the source.
594 594 if islocal(source):
595 595 defaultpath = util.abspath(urlutil.urllocalpath(source))
596 596 else:
597 597 defaultpath = source
598 598
599 599 sharerepo = repository(ui, path=sharepath)
600 600 destrepo = share(
601 601 ui,
602 602 sharerepo,
603 603 dest=dest,
604 604 update=False,
605 605 bookmarks=False,
606 606 defaultpath=defaultpath,
607 607 )
608 608
609 609 # We need to perform a pull against the dest repo to fetch bookmarks
610 610 # and other non-store data that isn't shared by default. In the case of
611 611 # non-existing shared repo, this means we pull from the remote twice. This
612 612 # is a bit weird. But at the time it was implemented, there wasn't an easy
613 613 # way to pull just non-changegroup data.
614 614 exchange.pull(destrepo, srcpeer, heads=revs)
615 615
616 616 _postshareupdate(destrepo, update)
617 617
618 618 return srcpeer, peer(ui, peeropts, dest)
619 619
620 620
621 621 # Recomputing caches is often slow on big repos, so copy them.
622 622 def _copycache(srcrepo, dstcachedir, fname):
623 623 """copy a cache from srcrepo to destcachedir (if it exists)"""
624 624 srcfname = srcrepo.cachevfs.join(fname)
625 625 dstfname = os.path.join(dstcachedir, fname)
626 626 if os.path.exists(srcfname):
627 627 if not os.path.exists(dstcachedir):
628 628 os.mkdir(dstcachedir)
629 629 util.copyfile(srcfname, dstfname)
630 630
631 631
632 632 def clone(
633 633 ui,
634 634 peeropts,
635 635 source,
636 636 dest=None,
637 637 pull=False,
638 638 revs=None,
639 639 update=True,
640 640 stream=False,
641 641 branch=None,
642 642 shareopts=None,
643 643 storeincludepats=None,
644 644 storeexcludepats=None,
645 645 depth=None,
646 646 ):
647 647 """Make a copy of an existing repository.
648 648
649 649 Create a copy of an existing repository in a new directory. The
650 650 source and destination are URLs, as passed to the repository
651 651 function. Returns a pair of repository peers, the source and
652 652 newly created destination.
653 653
654 654 The location of the source is added to the new repository's
655 655 .hg/hgrc file, as the default to be used for future pulls and
656 656 pushes.
657 657
658 658 If an exception is raised, the partly cloned/updated destination
659 659 repository will be deleted.
660 660
661 661 Arguments:
662 662
663 663 source: repository object or URL
664 664
665 665 dest: URL of destination repository to create (defaults to base
666 666 name of source repository)
667 667
668 668 pull: always pull from source repository, even in local case or if the
669 669 server prefers streaming
670 670
671 671 stream: stream raw data uncompressed from repository (fast over
672 672 LAN, slow over WAN)
673 673
674 674 revs: revision to clone up to (implies pull=True)
675 675
676 676 update: update working directory after clone completes, if
677 677 destination is local repository (True means update to default rev,
678 678 anything else is treated as a revision)
679 679
680 680 branch: branches to clone
681 681
682 682 shareopts: dict of options to control auto sharing behavior. The "pool" key
683 683 activates auto sharing mode and defines the directory for stores. The
684 684 "mode" key determines how to construct the directory name of the shared
685 685 repository. "identity" means the name is derived from the node of the first
686 686 changeset in the repository. "remote" means the name is derived from the
687 687 remote's path/URL. Defaults to "identity."
688 688
689 689 storeincludepats and storeexcludepats: sets of file patterns to include and
690 690 exclude in the repository copy, respectively. If not defined, all files
691 691 will be included (a "full" clone). Otherwise a "narrow" clone containing
692 692 only the requested files will be performed. If ``storeincludepats`` is not
693 693 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
694 694 ``path:.``. If both are empty sets, no files will be cloned.
695 695 """
696 696
697 697 if isinstance(source, bytes):
698 698 src = urlutil.get_clone_path(ui, source, branch)
699 699 origsource, source, branches = src
700 700 srcpeer = peer(ui, peeropts, source)
701 701 else:
702 702 srcpeer = source.peer() # in case we were called with a localrepo
703 703 branches = (None, branch or [])
704 704 origsource = source = srcpeer.url()
705 705 srclock = destlock = destwlock = cleandir = None
706 706 destpeer = None
707 707 try:
708 708 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
709 709
710 710 if dest is None:
711 711 dest = defaultdest(source)
712 712 if dest:
713 713 ui.status(_(b"destination directory: %s\n") % dest)
714 714 else:
715 715 dest = urlutil.get_clone_path(ui, dest)[0]
716 716
717 717 dest = urlutil.urllocalpath(dest)
718 718 source = urlutil.urllocalpath(source)
719 719
720 720 if not dest:
721 721 raise error.InputError(_(b"empty destination path is not valid"))
722 722
723 723 destvfs = vfsmod.vfs(dest, expandpath=True)
724 724 if destvfs.lexists():
725 725 if not destvfs.isdir():
726 726 raise error.InputError(
727 727 _(b"destination '%s' already exists") % dest
728 728 )
729 729 elif destvfs.listdir():
730 730 raise error.InputError(
731 731 _(b"destination '%s' is not empty") % dest
732 732 )
733 733
734 734 createopts = {}
735 735 narrow = False
736 736
737 737 if storeincludepats is not None:
738 738 narrowspec.validatepatterns(storeincludepats)
739 739 narrow = True
740 740
741 741 if storeexcludepats is not None:
742 742 narrowspec.validatepatterns(storeexcludepats)
743 743 narrow = True
744 744
745 745 if narrow:
746 746 # Include everything by default if only exclusion patterns defined.
747 747 if storeexcludepats and not storeincludepats:
748 748 storeincludepats = {b'path:.'}
749 749
750 750 createopts[b'narrowfiles'] = True
751 751
752 752 if depth:
753 753 createopts[b'shallowfilestore'] = True
754 754
755 755 if srcpeer.capable(b'lfs-serve'):
756 756 # Repository creation honors the config if it disabled the extension, so
757 757 # we can't just announce that lfs will be enabled. This check avoids
758 758 # saying that lfs will be enabled, and then saying it's an unknown
759 759 # feature. The lfs creation option is set in either case so that a
760 760 # requirement is added. If the extension is explicitly disabled but the
761 761 # requirement is set, the clone aborts early, before transferring any
762 762 # data.
763 763 createopts[b'lfs'] = True
764 764
765 765 if extensions.disabled_help(b'lfs'):
766 766 ui.status(
767 767 _(
768 768 b'(remote is using large file support (lfs), but it is '
769 769 b'explicitly disabled in the local configuration)\n'
770 770 )
771 771 )
772 772 else:
773 773 ui.status(
774 774 _(
775 775 b'(remote is using large file support (lfs); lfs will '
776 776 b'be enabled for this repository)\n'
777 777 )
778 778 )
779 779
780 780 shareopts = shareopts or {}
781 781 sharepool = shareopts.get(b'pool')
782 782 sharenamemode = shareopts.get(b'mode')
783 783 if sharepool and islocal(dest):
784 784 sharepath = None
785 785 if sharenamemode == b'identity':
786 786 # Resolve the name from the initial changeset in the remote
787 787 # repository. This returns nullid when the remote is empty. It
788 788 # raises RepoLookupError if revision 0 is filtered or otherwise
789 789 # not available. If we fail to resolve, sharing is not enabled.
790 790 try:
791 791 with srcpeer.commandexecutor() as e:
792 792 rootnode = e.callcommand(
793 793 b'lookup',
794 794 {
795 795 b'key': b'0',
796 796 },
797 797 ).result()
798 798
799 799 if rootnode != sha1nodeconstants.nullid:
800 800 sharepath = os.path.join(sharepool, hex(rootnode))
801 801 else:
802 802 ui.status(
803 803 _(
804 804 b'(not using pooled storage: '
805 805 b'remote appears to be empty)\n'
806 806 )
807 807 )
808 808 except error.RepoLookupError:
809 809 ui.status(
810 810 _(
811 811 b'(not using pooled storage: '
812 812 b'unable to resolve identity of remote)\n'
813 813 )
814 814 )
815 815 elif sharenamemode == b'remote':
816 816 sharepath = os.path.join(
817 817 sharepool, hex(hashutil.sha1(source).digest())
818 818 )
819 819 else:
820 820 raise error.Abort(
821 821 _(b'unknown share naming mode: %s') % sharenamemode
822 822 )
823 823
824 824 # TODO this is a somewhat arbitrary restriction.
825 825 if narrow:
826 826 ui.status(
827 827 _(b'(pooled storage not supported for narrow clones)\n')
828 828 )
829 829 sharepath = None
830 830
831 831 if sharepath:
832 832 return clonewithshare(
833 833 ui,
834 834 peeropts,
835 835 sharepath,
836 836 source,
837 837 srcpeer,
838 838 dest,
839 839 pull=pull,
840 840 rev=revs,
841 841 update=update,
842 842 stream=stream,
843 843 )
844 844
845 845 srcrepo = srcpeer.local()
846 846
847 847 abspath = origsource
848 848 if islocal(origsource):
849 849 abspath = util.abspath(urlutil.urllocalpath(origsource))
850 850
851 851 if islocal(dest):
852 852 if os.path.exists(dest):
853 853 # only clean up directories we create ourselves
854 854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 855 cleandir = hgdir
856 856 else:
857 857 cleandir = dest
858 858
859 859 copy = False
860 860 if (
861 861 srcrepo
862 862 and srcrepo.cancopy()
863 863 and islocal(dest)
864 864 and not phases.hassecret(srcrepo)
865 865 ):
866 866 copy = not pull and not revs
867 867
868 868 # TODO this is a somewhat arbitrary restriction.
869 869 if narrow:
870 870 copy = False
871 871
872 872 if copy:
873 873 try:
874 874 # we use a lock here because if we race with commit, we
875 875 # can end up with extra data in the cloned revlogs that's
876 876 # not pointed to by changesets, thus causing verify to
877 877 # fail
878 878 srclock = srcrepo.lock(wait=False)
879 879 except error.LockError:
880 880 copy = False
881 881
882 882 if copy:
883 883 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
884 884
885 885 destrootpath = urlutil.urllocalpath(dest)
886 886 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
887 887 localrepo.createrepository(
888 888 ui,
889 889 destrootpath,
890 890 requirements=dest_reqs,
891 891 )
892 892 destrepo = localrepo.makelocalrepository(ui, destrootpath)
893 893
894 894 destwlock = destrepo.wlock()
895 895 destlock = destrepo.lock()
896 896 from . import streamclone # avoid cycle
897 897
898 898 streamclone.local_copy(srcrepo, destrepo)
899 899
900 900 # we need to re-init the repo after manually copying the data
901 901 # into it
902 902 destpeer = peer(srcrepo, peeropts, dest)
903 903
904 904 # make the peer aware that is it already locked
905 905 #
906 906 # important:
907 907 #
908 908 # We still need to release that lock at the end of the function
909 909 destpeer.local()._lockref = weakref.ref(destlock)
910 910 destpeer.local()._wlockref = weakref.ref(destwlock)
911 911 # dirstate also needs to be copied because `_wlockref` has a reference
912 912 # to it: this dirstate is saved to disk when the wlock is released
913 913 destpeer.local().dirstate = destrepo.dirstate
914 914
915 915 srcrepo.hook(
916 916 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
917 917 )
918 918 else:
919 919 try:
920 920 # only pass ui when no srcrepo
921 921 destpeer = peer(
922 922 srcrepo or ui,
923 923 peeropts,
924 924 dest,
925 925 create=True,
926 926 createopts=createopts,
927 927 )
928 928 except FileExistsError:
929 929 cleandir = None
930 930 raise error.Abort(_(b"destination '%s' already exists") % dest)
931 931
932 932 if revs:
933 933 if not srcpeer.capable(b'lookup'):
934 934 raise error.Abort(
935 935 _(
936 936 b"src repository does not support "
937 937 b"revision lookup and so doesn't "
938 938 b"support clone by revision"
939 939 )
940 940 )
941 941
942 942 # TODO this is batchable.
943 943 remoterevs = []
944 944 for rev in revs:
945 945 with srcpeer.commandexecutor() as e:
946 946 remoterevs.append(
947 947 e.callcommand(
948 948 b'lookup',
949 949 {
950 950 b'key': rev,
951 951 },
952 952 ).result()
953 953 )
954 954 revs = remoterevs
955 955
956 956 checkout = revs[0]
957 957 else:
958 958 revs = None
959 959 local = destpeer.local()
960 960 if local:
961 961 if narrow:
962 962 with local.wlock(), local.lock():
963 963 local.setnarrowpats(storeincludepats, storeexcludepats)
964 964 narrowspec.copytoworkingcopy(local)
965 965
966 966 u = urlutil.url(abspath)
967 967 defaulturl = bytes(u)
968 968 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
969 969 if not stream:
970 970 if pull:
971 971 stream = False
972 972 else:
973 973 stream = None
974 974 # internal config: ui.quietbookmarkmove
975 975 overrides = {(b'ui', b'quietbookmarkmove'): True}
976 976 with local.ui.configoverride(overrides, b'clone'):
977 977 exchange.pull(
978 978 local,
979 979 srcpeer,
980 980 heads=revs,
981 981 streamclonerequested=stream,
982 982 includepats=storeincludepats,
983 983 excludepats=storeexcludepats,
984 984 depth=depth,
985 985 )
986 986 elif srcrepo:
987 987 # TODO lift restriction once exchange.push() accepts narrow
988 988 # push.
989 989 if narrow:
990 990 raise error.Abort(
991 991 _(
992 992 b'narrow clone not available for '
993 993 b'remote destinations'
994 994 )
995 995 )
996 996
997 997 exchange.push(
998 998 srcrepo,
999 999 destpeer,
1000 1000 revs=revs,
1001 1001 bookmarks=srcrepo._bookmarks.keys(),
1002 1002 )
1003 1003 else:
1004 1004 raise error.Abort(
1005 1005 _(b"clone from remote to remote not supported")
1006 1006 )
1007 1007
1008 1008 cleandir = None
1009 1009
1010 1010 destrepo = destpeer.local()
1011 1011 if destrepo:
1012 1012 template = uimod.samplehgrcs[b'cloned']
1013 1013 u = urlutil.url(abspath)
1014 1014 u.passwd = None
1015 1015 defaulturl = bytes(u)
1016 1016 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1017 1017 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1018 1018
1019 1019 if ui.configbool(b'experimental', b'remotenames'):
1020 1020 logexchange.pullremotenames(destrepo, srcpeer)
1021 1021
1022 1022 if update:
1023 1023 if update is not True:
1024 1024 with srcpeer.commandexecutor() as e:
1025 1025 checkout = e.callcommand(
1026 1026 b'lookup',
1027 1027 {
1028 1028 b'key': update,
1029 1029 },
1030 1030 ).result()
1031 1031
1032 1032 uprev = None
1033 1033 status = None
1034 1034 if checkout is not None:
1035 1035 # Some extensions (at least hg-git and hg-subversion) have
1036 1036 # a peer.lookup() implementation that returns a name instead
1037 1037 # of a nodeid. We work around it here until we've figured
1038 1038 # out a better solution.
1039 1039 if len(checkout) == 20 and checkout in destrepo:
1040 1040 uprev = checkout
1041 1041 elif scmutil.isrevsymbol(destrepo, checkout):
1042 1042 uprev = scmutil.revsymbol(destrepo, checkout).node()
1043 1043 else:
1044 1044 if update is not True:
1045 1045 try:
1046 1046 uprev = destrepo.lookup(update)
1047 1047 except error.RepoLookupError:
1048 1048 pass
1049 1049 if uprev is None:
1050 1050 try:
1051 1051 if destrepo._activebookmark:
1052 1052 uprev = destrepo.lookup(destrepo._activebookmark)
1053 1053 update = destrepo._activebookmark
1054 1054 else:
1055 1055 uprev = destrepo._bookmarks[b'@']
1056 1056 update = b'@'
1057 1057 bn = destrepo[uprev].branch()
1058 1058 if bn == b'default':
1059 1059 status = _(b"updating to bookmark %s\n" % update)
1060 1060 else:
1061 1061 status = (
1062 1062 _(b"updating to bookmark %s on branch %s\n")
1063 1063 ) % (update, bn)
1064 1064 except KeyError:
1065 1065 try:
1066 1066 uprev = destrepo.branchtip(b'default')
1067 1067 except error.RepoLookupError:
1068 1068 uprev = destrepo.lookup(b'tip')
1069 1069 if not status:
1070 1070 bn = destrepo[uprev].branch()
1071 1071 status = _(b"updating to branch %s\n") % bn
1072 1072 destrepo.ui.status(status)
1073 1073 _update(destrepo, uprev)
1074 1074 if update in destrepo._bookmarks:
1075 1075 bookmarks.activate(destrepo, update)
1076 1076 if destlock is not None:
1077 1077 release(destlock)
1078 1078 if destwlock is not None:
1079 1079 release(destlock)
1080 1080 # here is a tiny windows were someone could end up writing the
1081 1081 # repository before the cache are sure to be warm. This is "fine"
1082 1082 # as the only "bad" outcome would be some slowness. That potential
1083 1083 # slowness already affect reader.
1084 1084 with destrepo.lock():
1085 1085 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1086 1086 finally:
1087 1087 release(srclock, destlock, destwlock)
1088 1088 if cleandir is not None:
1089 1089 shutil.rmtree(cleandir, True)
1090 1090 if srcpeer is not None:
1091 1091 srcpeer.close()
1092 1092 if destpeer and destpeer.local() is None:
1093 1093 destpeer.close()
1094 1094 return srcpeer, destpeer
1095 1095
1096 1096
1097 1097 def _showstats(repo, stats, quietempty=False):
1098 1098 if quietempty and stats.isempty():
1099 1099 return
1100 1100 repo.ui.status(
1101 1101 _(
1102 1102 b"%d files updated, %d files merged, "
1103 1103 b"%d files removed, %d files unresolved\n"
1104 1104 )
1105 1105 % (
1106 1106 stats.updatedcount,
1107 1107 stats.mergedcount,
1108 1108 stats.removedcount,
1109 1109 stats.unresolvedcount,
1110 1110 )
1111 1111 )
1112 1112
1113 1113
1114 1114 def updaterepo(repo, node, overwrite, updatecheck=None):
1115 1115 """Update the working directory to node.
1116 1116
1117 1117 When overwrite is set, changes are clobbered, merged else
1118 1118
1119 1119 returns stats (see pydoc mercurial.merge.applyupdates)"""
1120 1120 repo.ui.deprecwarn(
1121 1121 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1122 1122 b'5.7',
1123 1123 )
1124 1124 return mergemod._update(
1125 1125 repo,
1126 1126 node,
1127 1127 branchmerge=False,
1128 1128 force=overwrite,
1129 1129 labels=[b'working copy', b'destination'],
1130 1130 updatecheck=updatecheck,
1131 1131 )
1132 1132
1133 1133
1134 1134 def update(repo, node, quietempty=False, updatecheck=None):
1135 1135 """update the working directory to node"""
1136 1136 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1137 1137 _showstats(repo, stats, quietempty)
1138 1138 if stats.unresolvedcount:
1139 1139 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1140 1140 return stats.unresolvedcount > 0
1141 1141
1142 1142
1143 1143 # naming conflict in clone()
1144 1144 _update = update
1145 1145
1146 1146
1147 1147 def clean(repo, node, show_stats=True, quietempty=False):
1148 1148 """forcibly switch the working directory to node, clobbering changes"""
1149 1149 stats = mergemod.clean_update(repo[node])
1150 1150 assert stats.unresolvedcount == 0
1151 1151 if show_stats:
1152 1152 _showstats(repo, stats, quietempty)
1153 1153 return False
1154 1154
1155 1155
1156 1156 # naming conflict in updatetotally()
1157 1157 _clean = clean
1158 1158
1159 1159 _VALID_UPDATECHECKS = {
1160 1160 mergemod.UPDATECHECK_ABORT,
1161 1161 mergemod.UPDATECHECK_NONE,
1162 1162 mergemod.UPDATECHECK_LINEAR,
1163 1163 mergemod.UPDATECHECK_NO_CONFLICT,
1164 1164 }
1165 1165
1166 1166
1167 1167 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1168 1168 """Update the working directory with extra care for non-file components
1169 1169
1170 1170 This takes care of non-file components below:
1171 1171
1172 1172 :bookmark: might be advanced or (in)activated
1173 1173
1174 1174 This takes arguments below:
1175 1175
1176 1176 :checkout: to which revision the working directory is updated
1177 1177 :brev: a name, which might be a bookmark to be activated after updating
1178 1178 :clean: whether changes in the working directory can be discarded
1179 1179 :updatecheck: how to deal with a dirty working directory
1180 1180
1181 1181 Valid values for updatecheck are the UPDATECHECK_* constants
1182 1182 defined in the merge module. Passing `None` will result in using the
1183 1183 configured default.
1184 1184
1185 1185 * ABORT: abort if the working directory is dirty
1186 1186 * NONE: don't check (merge working directory changes into destination)
1187 1187 * LINEAR: check that update is linear before merging working directory
1188 1188 changes into destination
1189 1189 * NO_CONFLICT: check that the update does not result in file merges
1190 1190
1191 1191 This returns whether conflict is detected at updating or not.
1192 1192 """
1193 1193 if updatecheck is None:
1194 1194 updatecheck = ui.config(b'commands', b'update.check')
1195 1195 if updatecheck not in _VALID_UPDATECHECKS:
1196 1196 # If not configured, or invalid value configured
1197 1197 updatecheck = mergemod.UPDATECHECK_LINEAR
1198 1198 if updatecheck not in _VALID_UPDATECHECKS:
1199 1199 raise ValueError(
1200 1200 r'Invalid updatecheck value %r (can accept %r)'
1201 1201 % (updatecheck, _VALID_UPDATECHECKS)
1202 1202 )
1203 1203 with repo.wlock():
1204 1204 movemarkfrom = None
1205 1205 warndest = False
1206 1206 if checkout is None:
1207 1207 updata = destutil.destupdate(repo, clean=clean)
1208 1208 checkout, movemarkfrom, brev = updata
1209 1209 warndest = True
1210 1210
1211 1211 if clean:
1212 1212 ret = _clean(repo, checkout)
1213 1213 else:
1214 1214 if updatecheck == mergemod.UPDATECHECK_ABORT:
1215 1215 cmdutil.bailifchanged(repo, merge=False)
1216 1216 updatecheck = mergemod.UPDATECHECK_NONE
1217 1217 ret = _update(repo, checkout, updatecheck=updatecheck)
1218 1218
1219 1219 if not ret and movemarkfrom:
1220 1220 if movemarkfrom == repo[b'.'].node():
1221 1221 pass # no-op update
1222 1222 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1223 1223 b = ui.label(repo._activebookmark, b'bookmarks.active')
1224 1224 ui.status(_(b"updating bookmark %s\n") % b)
1225 1225 else:
1226 1226 # this can happen with a non-linear update
1227 1227 b = ui.label(repo._activebookmark, b'bookmarks')
1228 1228 ui.status(_(b"(leaving bookmark %s)\n") % b)
1229 1229 bookmarks.deactivate(repo)
1230 1230 elif brev in repo._bookmarks:
1231 1231 if brev != repo._activebookmark:
1232 1232 b = ui.label(brev, b'bookmarks.active')
1233 1233 ui.status(_(b"(activating bookmark %s)\n") % b)
1234 1234 bookmarks.activate(repo, brev)
1235 1235 elif brev:
1236 1236 if repo._activebookmark:
1237 1237 b = ui.label(repo._activebookmark, b'bookmarks')
1238 1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1239 1239 bookmarks.deactivate(repo)
1240 1240
1241 1241 if warndest:
1242 1242 destutil.statusotherdests(ui, repo)
1243 1243
1244 1244 return ret
1245 1245
1246 1246
1247 1247 def merge(
1248 1248 ctx,
1249 1249 force=False,
1250 1250 remind=True,
1251 1251 labels=None,
1252 1252 ):
1253 1253 """Branch merge with node, resolving changes. Return true if any
1254 1254 unresolved conflicts."""
1255 1255 repo = ctx.repo()
1256 1256 stats = mergemod.merge(ctx, force=force, labels=labels)
1257 1257 _showstats(repo, stats)
1258 1258 if stats.unresolvedcount:
1259 1259 repo.ui.status(
1260 1260 _(
1261 1261 b"use 'hg resolve' to retry unresolved file merges "
1262 1262 b"or 'hg merge --abort' to abandon\n"
1263 1263 )
1264 1264 )
1265 1265 elif remind:
1266 1266 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1267 1267 return stats.unresolvedcount > 0
1268 1268
1269 1269
1270 1270 def abortmerge(ui, repo):
1271 1271 ms = mergestatemod.mergestate.read(repo)
1272 1272 if ms.active():
1273 1273 # there were conflicts
1274 1274 node = ms.localctx.hex()
1275 1275 else:
1276 1276 # there were no conficts, mergestate was not stored
1277 1277 node = repo[b'.'].hex()
1278 1278
1279 1279 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1280 1280 stats = mergemod.clean_update(repo[node])
1281 1281 assert stats.unresolvedcount == 0
1282 1282 _showstats(repo, stats)
1283 1283
1284 1284
1285 1285 def _incoming(
1286 1286 displaychlist,
1287 1287 subreporecurse,
1288 1288 ui,
1289 1289 repo,
1290 1290 source,
1291 1291 opts,
1292 1292 buffered=False,
1293 1293 subpath=None,
1294 1294 ):
1295 1295 """
1296 1296 Helper for incoming / gincoming.
1297 1297 displaychlist gets called with
1298 1298 (remoterepo, incomingchangesetlist, displayer) parameters,
1299 1299 and is supposed to contain only code that can't be unified.
1300 1300 """
1301 1301 srcs = urlutil.get_pull_paths(repo, ui, [source])
1302 1302 srcs = list(srcs)
1303 1303 if len(srcs) != 1:
1304 1304 msg = _(b'for now, incoming supports only a single source, %d provided')
1305 1305 msg %= len(srcs)
1306 1306 raise error.Abort(msg)
1307 1307 path = srcs[0]
1308 1308 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1309 1309 if subpath is not None:
1310 1310 subpath = urlutil.url(subpath)
1311 1311 if subpath.isabs():
1312 1312 source = bytes(subpath)
1313 1313 else:
1314 1314 p = urlutil.url(source)
1315 1315 if p.islocal():
1316 1316 normpath = os.path.normpath
1317 1317 else:
1318 1318 normpath = posixpath.normpath
1319 1319 p.path = normpath(b'%s/%s' % (p.path, subpath))
1320 1320 source = bytes(p)
1321 1321 other = peer(repo, opts, source)
1322 1322 cleanupfn = other.close
1323 1323 try:
1324 1324 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1325 1325 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1326 1326
1327 1327 if revs:
1328 1328 revs = [other.lookup(rev) for rev in revs]
1329 1329 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1330 1330 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1331 1331 )
1332 1332
1333 1333 if not chlist:
1334 1334 ui.status(_(b"no changes found\n"))
1335 1335 return subreporecurse()
1336 1336 ui.pager(b'incoming')
1337 1337 displayer = logcmdutil.changesetdisplayer(
1338 1338 ui, other, opts, buffered=buffered
1339 1339 )
1340 1340 displaychlist(other, chlist, displayer)
1341 1341 displayer.close()
1342 1342 finally:
1343 1343 cleanupfn()
1344 1344 subreporecurse()
1345 1345 return 0 # exit code is zero since we found incoming changes
1346 1346
1347 1347
1348 1348 def incoming(ui, repo, source, opts, subpath=None):
1349 1349 def subreporecurse():
1350 1350 ret = 1
1351 1351 if opts.get(b'subrepos'):
1352 1352 ctx = repo[None]
1353 1353 for subpath in sorted(ctx.substate):
1354 1354 sub = ctx.sub(subpath)
1355 1355 ret = min(ret, sub.incoming(ui, source, opts))
1356 1356 return ret
1357 1357
1358 1358 def display(other, chlist, displayer):
1359 1359 limit = logcmdutil.getlimit(opts)
1360 1360 if opts.get(b'newest_first'):
1361 1361 chlist.reverse()
1362 1362 count = 0
1363 1363 for n in chlist:
1364 1364 if limit is not None and count >= limit:
1365 1365 break
1366 1366 parents = [
1367 1367 p for p in other.changelog.parents(n) if p != repo.nullid
1368 1368 ]
1369 1369 if opts.get(b'no_merges') and len(parents) == 2:
1370 1370 continue
1371 1371 count += 1
1372 1372 displayer.show(other[n])
1373 1373
1374 1374 return _incoming(
1375 1375 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1376 1376 )
1377 1377
1378 1378
1379 1379 def _outgoing(ui, repo, dests, opts, subpath=None):
1380 1380 out = set()
1381 1381 others = []
1382 1382 for path in urlutil.get_push_paths(repo, ui, dests):
1383 dest = path.pushloc or path.loc
1383 dest = path.loc
1384 1384 if subpath is not None:
1385 1385 subpath = urlutil.url(subpath)
1386 1386 if subpath.isabs():
1387 1387 dest = bytes(subpath)
1388 1388 else:
1389 1389 p = urlutil.url(dest)
1390 1390 if p.islocal():
1391 1391 normpath = os.path.normpath
1392 1392 else:
1393 1393 normpath = posixpath.normpath
1394 1394 p.path = normpath(b'%s/%s' % (p.path, subpath))
1395 1395 dest = bytes(p)
1396 1396 branches = path.branch, opts.get(b'branch') or []
1397 1397
1398 1398 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1399 1399 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1400 1400 if revs:
1401 1401 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1402 1402
1403 1403 other = peer(repo, opts, dest)
1404 1404 try:
1405 1405 outgoing = discovery.findcommonoutgoing(
1406 1406 repo, other, revs, force=opts.get(b'force')
1407 1407 )
1408 1408 o = outgoing.missing
1409 1409 out.update(o)
1410 1410 if not o:
1411 1411 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1412 1412 others.append(other)
1413 1413 except: # re-raises
1414 1414 other.close()
1415 1415 raise
1416 1416 # make sure this is ordered by revision number
1417 1417 outgoing_revs = list(out)
1418 1418 cl = repo.changelog
1419 1419 outgoing_revs.sort(key=cl.rev)
1420 1420 return outgoing_revs, others
1421 1421
1422 1422
1423 1423 def _outgoing_recurse(ui, repo, dests, opts):
1424 1424 ret = 1
1425 1425 if opts.get(b'subrepos'):
1426 1426 ctx = repo[None]
1427 1427 for subpath in sorted(ctx.substate):
1428 1428 sub = ctx.sub(subpath)
1429 1429 ret = min(ret, sub.outgoing(ui, dests, opts))
1430 1430 return ret
1431 1431
1432 1432
1433 1433 def _outgoing_filter(repo, revs, opts):
1434 1434 """apply revision filtering/ordering option for outgoing"""
1435 1435 limit = logcmdutil.getlimit(opts)
1436 1436 no_merges = opts.get(b'no_merges')
1437 1437 if opts.get(b'newest_first'):
1438 1438 revs.reverse()
1439 1439 if limit is None and not no_merges:
1440 1440 for r in revs:
1441 1441 yield r
1442 1442 return
1443 1443
1444 1444 count = 0
1445 1445 cl = repo.changelog
1446 1446 for n in revs:
1447 1447 if limit is not None and count >= limit:
1448 1448 break
1449 1449 parents = [p for p in cl.parents(n) if p != repo.nullid]
1450 1450 if no_merges and len(parents) == 2:
1451 1451 continue
1452 1452 count += 1
1453 1453 yield n
1454 1454
1455 1455
1456 1456 def outgoing(ui, repo, dests, opts, subpath=None):
1457 1457 if opts.get(b'graph'):
1458 1458 logcmdutil.checkunsupportedgraphflags([], opts)
1459 1459 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1460 1460 ret = 1
1461 1461 try:
1462 1462 if o:
1463 1463 ret = 0
1464 1464
1465 1465 if opts.get(b'graph'):
1466 1466 revdag = logcmdutil.graphrevs(repo, o, opts)
1467 1467 ui.pager(b'outgoing')
1468 1468 displayer = logcmdutil.changesetdisplayer(
1469 1469 ui, repo, opts, buffered=True
1470 1470 )
1471 1471 logcmdutil.displaygraph(
1472 1472 ui, repo, revdag, displayer, graphmod.asciiedges
1473 1473 )
1474 1474 else:
1475 1475 ui.pager(b'outgoing')
1476 1476 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1477 1477 for n in _outgoing_filter(repo, o, opts):
1478 1478 displayer.show(repo[n])
1479 1479 displayer.close()
1480 1480 for oth in others:
1481 1481 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1482 1482 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1483 1483 return ret # exit code is zero since we found outgoing changes
1484 1484 finally:
1485 1485 for oth in others:
1486 1486 oth.close()
1487 1487
1488 1488
1489 1489 def verify(repo, level=None):
1490 1490 """verify the consistency of a repository"""
1491 1491 ret = verifymod.verify(repo, level=level)
1492 1492
1493 1493 # Broken subrepo references in hidden csets don't seem worth worrying about,
1494 1494 # since they can't be pushed/pulled, and --hidden can be used if they are a
1495 1495 # concern.
1496 1496
1497 1497 # pathto() is needed for -R case
1498 1498 revs = repo.revs(
1499 1499 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1500 1500 )
1501 1501
1502 1502 if revs:
1503 1503 repo.ui.status(_(b'checking subrepo links\n'))
1504 1504 for rev in revs:
1505 1505 ctx = repo[rev]
1506 1506 try:
1507 1507 for subpath in ctx.substate:
1508 1508 try:
1509 1509 ret = (
1510 1510 ctx.sub(subpath, allowcreate=False).verify() or ret
1511 1511 )
1512 1512 except error.RepoError as e:
1513 1513 repo.ui.warn(b'%d: %s\n' % (rev, e))
1514 1514 except Exception:
1515 1515 repo.ui.warn(
1516 1516 _(b'.hgsubstate is corrupt in revision %s\n')
1517 1517 % short(ctx.node())
1518 1518 )
1519 1519
1520 1520 return ret
1521 1521
1522 1522
1523 1523 def remoteui(src, opts):
1524 1524 """build a remote ui from ui or repo and opts"""
1525 1525 if util.safehasattr(src, b'baseui'): # looks like a repository
1526 1526 dst = src.baseui.copy() # drop repo-specific config
1527 1527 src = src.ui # copy target options from repo
1528 1528 else: # assume it's a global ui object
1529 1529 dst = src.copy() # keep all global options
1530 1530
1531 1531 # copy ssh-specific options
1532 1532 for o in b'ssh', b'remotecmd':
1533 1533 v = opts.get(o) or src.config(b'ui', o)
1534 1534 if v:
1535 1535 dst.setconfig(b"ui", o, v, b'copied')
1536 1536
1537 1537 # copy bundle-specific options
1538 1538 r = src.config(b'bundle', b'mainreporoot')
1539 1539 if r:
1540 1540 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1541 1541
1542 1542 # copy selected local settings to the remote ui
1543 1543 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1544 1544 for key, val in src.configitems(sect):
1545 1545 dst.setconfig(sect, key, val, b'copied')
1546 1546 v = src.config(b'web', b'cacerts')
1547 1547 if v:
1548 1548 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1549 1549
1550 1550 return dst
1551 1551
1552 1552
1553 1553 # Files of interest
1554 1554 # Used to check if the repository has changed looking at mtime and size of
1555 1555 # these files.
1556 1556 foi = [
1557 1557 (b'spath', b'00changelog.i'),
1558 1558 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1559 1559 (b'spath', b'obsstore'),
1560 1560 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1561 1561 ]
1562 1562
1563 1563
1564 1564 class cachedlocalrepo:
1565 1565 """Holds a localrepository that can be cached and reused."""
1566 1566
1567 1567 def __init__(self, repo):
1568 1568 """Create a new cached repo from an existing repo.
1569 1569
1570 1570 We assume the passed in repo was recently created. If the
1571 1571 repo has changed between when it was created and when it was
1572 1572 turned into a cache, it may not refresh properly.
1573 1573 """
1574 1574 assert isinstance(repo, localrepo.localrepository)
1575 1575 self._repo = repo
1576 1576 self._state, self.mtime = self._repostate()
1577 1577 self._filtername = repo.filtername
1578 1578
1579 1579 def fetch(self):
1580 1580 """Refresh (if necessary) and return a repository.
1581 1581
1582 1582 If the cached instance is out of date, it will be recreated
1583 1583 automatically and returned.
1584 1584
1585 1585 Returns a tuple of the repo and a boolean indicating whether a new
1586 1586 repo instance was created.
1587 1587 """
1588 1588 # We compare the mtimes and sizes of some well-known files to
1589 1589 # determine if the repo changed. This is not precise, as mtimes
1590 1590 # are susceptible to clock skew and imprecise filesystems and
1591 1591 # file content can change while maintaining the same size.
1592 1592
1593 1593 state, mtime = self._repostate()
1594 1594 if state == self._state:
1595 1595 return self._repo, False
1596 1596
1597 1597 repo = repository(self._repo.baseui, self._repo.url())
1598 1598 if self._filtername:
1599 1599 self._repo = repo.filtered(self._filtername)
1600 1600 else:
1601 1601 self._repo = repo.unfiltered()
1602 1602 self._state = state
1603 1603 self.mtime = mtime
1604 1604
1605 1605 return self._repo, True
1606 1606
1607 1607 def _repostate(self):
1608 1608 state = []
1609 1609 maxmtime = -1
1610 1610 for attr, fname in foi:
1611 1611 prefix = getattr(self._repo, attr)
1612 1612 p = os.path.join(prefix, fname)
1613 1613 try:
1614 1614 st = os.stat(p)
1615 1615 except OSError:
1616 1616 st = os.stat(prefix)
1617 1617 state.append((st[stat.ST_MTIME], st.st_size))
1618 1618 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1619 1619
1620 1620 return tuple(state), maxmtime
1621 1621
1622 1622 def copy(self):
1623 1623 """Obtain a copy of this class instance.
1624 1624
1625 1625 A new localrepository instance is obtained. The new instance should be
1626 1626 completely independent of the original.
1627 1627 """
1628 1628 repo = repository(self._repo.baseui, self._repo.origroot)
1629 1629 if self._filtername:
1630 1630 repo = repo.filtered(self._filtername)
1631 1631 else:
1632 1632 repo = repo.unfiltered()
1633 1633 c = cachedlocalrepo(repo)
1634 1634 c._state = self._state
1635 1635 c.mtime = self.mtime
1636 1636 return c
General Comments 0
You need to be logged in to leave comments. Login now