##// END OF EJS Templates
clone: explicitly detect the need to fetch a peer...
marmoute -
r50642:8a38cd76 default
parent child Browse files
Show More
@@ -1,1664 +1,1669
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 if util.safehasattr(other, 'peer'):
70 70 # a courtesy to callers using a localrepo for other
71 71 peer = other.peer()
72 72 else:
73 73 peer = other
74 74 hashbranch, branches = branches
75 75 if not hashbranch and not branches:
76 76 x = revs or None
77 77 if revs:
78 78 y = revs[0]
79 79 else:
80 80 y = None
81 81 return x, y
82 82 if revs:
83 83 revs = list(revs)
84 84 else:
85 85 revs = []
86 86
87 87 if not peer.capable(b'branchmap'):
88 88 if branches:
89 89 raise error.Abort(_(b"remote branch lookup not supported"))
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 with peer.commandexecutor() as e:
94 94 branchmap = e.callcommand(b'branchmap', {}).result()
95 95
96 96 def primary(branch):
97 97 if branch == b'.':
98 98 if not lrepo:
99 99 raise error.Abort(_(b"dirstate branch not accessible"))
100 100 branch = lrepo.dirstate.branch()
101 101 if branch in branchmap:
102 102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 103 return True
104 104 else:
105 105 return False
106 106
107 107 for branch in branches:
108 108 if not primary(branch):
109 109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 110 if hashbranch:
111 111 if not primary(hashbranch):
112 112 revs.append(hashbranch)
113 113 return revs, revs[0]
114 114
115 115
116 116 def _isfile(path):
117 117 try:
118 118 # we use os.stat() directly here instead of os.path.isfile()
119 119 # because the latter started returning `False` on invalid path
120 120 # exceptions starting in 3.8 and we care about handling
121 121 # invalid paths specially here.
122 122 st = os.stat(path)
123 123 except ValueError as e:
124 124 msg = stringutil.forcebytestr(e)
125 125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 126 except OSError:
127 127 return False
128 128 else:
129 129 return stat.S_ISREG(st.st_mode)
130 130
131 131
132 132 class LocalFactory:
133 133 """thin wrapper to dispatch between localrepo and bundle repo"""
134 134
135 135 @staticmethod
136 136 def islocal(path: bytes) -> bool:
137 137 path = util.expandpath(urlutil.urllocalpath(path))
138 138 return not _isfile(path)
139 139
140 140 @staticmethod
141 141 def instance(ui, path, *args, **kwargs):
142 142 path = util.expandpath(urlutil.urllocalpath(path))
143 143 if _isfile(path):
144 144 cls = bundlerepo
145 145 else:
146 146 cls = localrepo
147 147 return cls.instance(ui, path, *args, **kwargs)
148 148
149 149
150 150 repo_schemes = {
151 151 b'bundle': bundlerepo,
152 152 b'union': unionrepo,
153 153 b'file': LocalFactory,
154 154 }
155 155
156 156 peer_schemes = {
157 157 b'http': httppeer,
158 158 b'https': httppeer,
159 159 b'ssh': sshpeer,
160 160 b'static-http': statichttprepo,
161 161 }
162 162
163 163
164 164 def _peerlookup(path):
165 165 u = urlutil.url(path)
166 166 scheme = u.scheme or b'file'
167 167 if scheme in peer_schemes:
168 168 return peer_schemes[scheme]
169 169 if scheme in repo_schemes:
170 170 return repo_schemes[scheme]
171 171 return LocalFactory
172 172
173 173
174 174 def islocal(repo):
175 175 '''return true if repo (or path pointing to repo) is local'''
176 176 if isinstance(repo, bytes):
177 177 cls = _peerlookup(repo)
178 178 cls.instance # make sure we load the module
179 179 if util.safehasattr(cls, 'islocal'):
180 180 return cls.islocal(repo) # pytype: disable=module-attr
181 181 return False
182 182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
183 183 return repo.local()
184 184
185 185
186 186 def openpath(ui, path, sendaccept=True):
187 187 '''open path with open if local, url.open if remote'''
188 188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
189 189 if pathurl.islocal():
190 190 return util.posixfile(pathurl.localpath(), b'rb')
191 191 else:
192 192 return url.open(ui, path, sendaccept=sendaccept)
193 193
194 194
195 195 # a list of (ui, repo) functions called for wire peer initialization
196 196 wirepeersetupfuncs = []
197 197
198 198
199 199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 200 ui = getattr(obj, "ui", ui)
201 201 for f in presetupfuncs or []:
202 202 f(ui, obj)
203 203 ui.log(b'extension', b'- executing reposetup hooks\n')
204 204 with util.timedcm('all reposetup') as allreposetupstats:
205 205 for name, module in extensions.extensions(ui):
206 206 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 207 hook = getattr(module, 'reposetup', None)
208 208 if hook:
209 209 with util.timedcm('reposetup %r', name) as stats:
210 210 hook(ui, obj)
211 211 msg = b' > reposetup for %s took %s\n'
212 212 ui.log(b'extension', msg, name, stats)
213 213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 214 if not obj.local():
215 215 for f in wirepeersetupfuncs:
216 216 f(ui, obj)
217 217
218 218
219 219 def repository(
220 220 ui,
221 221 path=b'',
222 222 create=False,
223 223 presetupfuncs=None,
224 224 intents=None,
225 225 createopts=None,
226 226 ):
227 227 """return a repository object for the specified path"""
228 228 scheme = urlutil.url(path).scheme
229 229 if scheme is None:
230 230 scheme = b'file'
231 231 cls = repo_schemes.get(scheme)
232 232 if cls is None:
233 233 if scheme in peer_schemes:
234 234 raise error.Abort(_(b"repository '%s' is not local") % path)
235 235 cls = LocalFactory
236 236 repo = cls.instance(
237 237 ui,
238 238 path,
239 239 create,
240 240 intents=intents,
241 241 createopts=createopts,
242 242 )
243 243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
244 244 return repo.filtered(b'visible')
245 245
246 246
247 247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
248 248 '''return a repository peer for the specified path'''
249 249 rui = remoteui(uiorrepo, opts)
250 250 if util.safehasattr(path, 'url'):
251 251 # this is a urlutil.path object
252 252 scheme = path.url.scheme # pytype: disable=attribute-error
253 253 # XXX for now we don't do anything more than that
254 254 path = path.loc # pytype: disable=attribute-error
255 255 else:
256 256 scheme = urlutil.url(path).scheme
257 257 if scheme in peer_schemes:
258 258 cls = peer_schemes[scheme]
259 259 peer = cls.instance(
260 260 rui,
261 261 path,
262 262 create,
263 263 intents=intents,
264 264 createopts=createopts,
265 265 )
266 266 _setup_repo_or_peer(rui, peer)
267 267 else:
268 268 # this is a repository
269 269 repo = repository(
270 270 rui,
271 271 path,
272 272 create,
273 273 intents=intents,
274 274 createopts=createopts,
275 275 )
276 276 peer = repo.peer()
277 277 return peer
278 278
279 279
280 280 def defaultdest(source):
281 281 """return default destination of clone if none is given
282 282
283 283 >>> defaultdest(b'foo')
284 284 'foo'
285 285 >>> defaultdest(b'/foo/bar')
286 286 'bar'
287 287 >>> defaultdest(b'/')
288 288 ''
289 289 >>> defaultdest(b'')
290 290 ''
291 291 >>> defaultdest(b'http://example.org/')
292 292 ''
293 293 >>> defaultdest(b'http://example.org/foo/')
294 294 'foo'
295 295 """
296 296 path = urlutil.url(source).path
297 297 if not path:
298 298 return b''
299 299 return os.path.basename(os.path.normpath(path))
300 300
301 301
302 302 def sharedreposource(repo):
303 303 """Returns repository object for source repository of a shared repo.
304 304
305 305 If repo is not a shared repository, returns None.
306 306 """
307 307 if repo.sharedpath == repo.path:
308 308 return None
309 309
310 310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 311 return repo.srcrepo
312 312
313 313 # the sharedpath always ends in the .hg; we want the path to the repo
314 314 source = repo.vfs.split(repo.sharedpath)[0]
315 315 srcurl, branches = urlutil.parseurl(source)
316 316 srcrepo = repository(repo.ui, srcurl)
317 317 repo.srcrepo = srcrepo
318 318 return srcrepo
319 319
320 320
321 321 def share(
322 322 ui,
323 323 source,
324 324 dest=None,
325 325 update=True,
326 326 bookmarks=True,
327 327 defaultpath=None,
328 328 relative=False,
329 329 ):
330 330 '''create a shared repository'''
331 331
332 332 not_local_msg = _(b'can only share local repositories')
333 333 if util.safehasattr(source, 'local'):
334 334 if source.local() is None:
335 335 raise error.Abort(not_local_msg)
336 336 elif not islocal(source):
337 337 # XXX why are we getting bytes here ?
338 338 raise error.Abort(not_local_msg)
339 339
340 340 if not dest:
341 341 dest = defaultdest(source)
342 342 else:
343 343 dest = urlutil.get_clone_path_obj(ui, dest).loc
344 344
345 345 if isinstance(source, bytes):
346 346 source_path = urlutil.get_clone_path_obj(ui, source)
347 347 srcrepo = repository(ui, source_path.loc)
348 348 branches = (source_path.branch, [])
349 349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 350 else:
351 351 srcrepo = source.local()
352 352 checkout = None
353 353
354 354 shareditems = set()
355 355 if bookmarks:
356 356 shareditems.add(sharedbookmarks)
357 357
358 358 r = repository(
359 359 ui,
360 360 dest,
361 361 create=True,
362 362 createopts={
363 363 b'sharedrepo': srcrepo,
364 364 b'sharedrelative': relative,
365 365 b'shareditems': shareditems,
366 366 },
367 367 )
368 368
369 369 postshare(srcrepo, r, defaultpath=defaultpath)
370 370 r = repository(ui, dest)
371 371 _postshareupdate(r, update, checkout=checkout)
372 372 return r
373 373
374 374
375 375 def _prependsourcehgrc(repo):
376 376 """copies the source repo config and prepend it in current repo .hg/hgrc
377 377 on unshare. This is only done if the share was perfomed using share safe
378 378 method where we share config of source in shares"""
379 379 srcvfs = vfsmod.vfs(repo.sharedpath)
380 380 dstvfs = vfsmod.vfs(repo.path)
381 381
382 382 if not srcvfs.exists(b'hgrc'):
383 383 return
384 384
385 385 currentconfig = b''
386 386 if dstvfs.exists(b'hgrc'):
387 387 currentconfig = dstvfs.read(b'hgrc')
388 388
389 389 with dstvfs(b'hgrc', b'wb') as fp:
390 390 sourceconfig = srcvfs.read(b'hgrc')
391 391 fp.write(b"# Config copied from shared source\n")
392 392 fp.write(sourceconfig)
393 393 fp.write(b'\n')
394 394 fp.write(currentconfig)
395 395
396 396
397 397 def unshare(ui, repo):
398 398 """convert a shared repository to a normal one
399 399
400 400 Copy the store data to the repo and remove the sharedpath data.
401 401
402 402 Returns a new repository object representing the unshared repository.
403 403
404 404 The passed repository object is not usable after this function is
405 405 called.
406 406 """
407 407
408 408 with repo.lock():
409 409 # we use locks here because if we race with commit, we
410 410 # can end up with extra data in the cloned revlogs that's
411 411 # not pointed to by changesets, thus causing verify to
412 412 # fail
413 413 destlock = copystore(ui, repo, repo.path)
414 414 with destlock or util.nullcontextmanager():
415 415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 416 # we were sharing .hg/hgrc of the share source with the current
417 417 # repo. We need to copy that while unsharing otherwise it can
418 418 # disable hooks and other checks
419 419 _prependsourcehgrc(repo)
420 420
421 421 sharefile = repo.vfs.join(b'sharedpath')
422 422 util.rename(sharefile, sharefile + b'.old')
423 423
424 424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 426 scmutil.writereporequirements(repo)
427 427
428 428 # Removing share changes some fundamental properties of the repo instance.
429 429 # So we instantiate a new repo object and operate on it rather than
430 430 # try to keep the existing repo usable.
431 431 newrepo = repository(repo.baseui, repo.root, create=False)
432 432
433 433 # TODO: figure out how to access subrepos that exist, but were previously
434 434 # removed from .hgsub
435 435 c = newrepo[b'.']
436 436 subs = c.substate
437 437 for s in sorted(subs):
438 438 c.sub(s).unshare()
439 439
440 440 localrepo.poisonrepository(repo)
441 441
442 442 return newrepo
443 443
444 444
445 445 def postshare(sourcerepo, destrepo, defaultpath=None):
446 446 """Called after a new shared repo is created.
447 447
448 448 The new repo only has a requirements file and pointer to the source.
449 449 This function configures additional shared data.
450 450
451 451 Extensions can wrap this function and write additional entries to
452 452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 453 """
454 454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 455 if default:
456 456 template = b'[paths]\ndefault = %s\n'
457 457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 459 with destrepo.wlock():
460 460 narrowspec.copytoworkingcopy(destrepo)
461 461
462 462
463 463 def _postshareupdate(repo, update, checkout=None):
464 464 """Maybe perform a working directory update after a shared repo is created.
465 465
466 466 ``update`` can be a boolean or a revision to update to.
467 467 """
468 468 if not update:
469 469 return
470 470
471 471 repo.ui.status(_(b"updating working directory\n"))
472 472 if update is not True:
473 473 checkout = update
474 474 for test in (checkout, b'default', b'tip'):
475 475 if test is None:
476 476 continue
477 477 try:
478 478 uprev = repo.lookup(test)
479 479 break
480 480 except error.RepoLookupError:
481 481 continue
482 482 _update(repo, uprev)
483 483
484 484
485 485 def copystore(ui, srcrepo, destpath):
486 486 """copy files from store of srcrepo in destpath
487 487
488 488 returns destlock
489 489 """
490 490 destlock = None
491 491 try:
492 492 hardlink = None
493 493 topic = _(b'linking') if hardlink else _(b'copying')
494 494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 495 num = 0
496 496 srcpublishing = srcrepo.publishing()
497 497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 498 dstvfs = vfsmod.vfs(destpath)
499 499 for f in srcrepo.store.copylist():
500 500 if srcpublishing and f.endswith(b'phaseroots'):
501 501 continue
502 502 dstbase = os.path.dirname(f)
503 503 if dstbase and not dstvfs.exists(dstbase):
504 504 dstvfs.mkdir(dstbase)
505 505 if srcvfs.exists(f):
506 506 if f.endswith(b'data'):
507 507 # 'dstbase' may be empty (e.g. revlog format 0)
508 508 lockfile = os.path.join(dstbase, b"lock")
509 509 # lock to avoid premature writing to the target
510 510 destlock = lock.lock(dstvfs, lockfile)
511 511 hardlink, n = util.copyfiles(
512 512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 513 )
514 514 num += n
515 515 if hardlink:
516 516 ui.debug(b"linked %d files\n" % num)
517 517 else:
518 518 ui.debug(b"copied %d files\n" % num)
519 519 return destlock
520 520 except: # re-raises
521 521 release(destlock)
522 522 raise
523 523
524 524
525 525 def clonewithshare(
526 526 ui,
527 527 peeropts,
528 528 sharepath,
529 529 source,
530 530 srcpeer,
531 531 dest,
532 532 pull=False,
533 533 rev=None,
534 534 update=True,
535 535 stream=False,
536 536 ):
537 537 """Perform a clone using a shared repo.
538 538
539 539 The store for the repository will be located at <sharepath>/.hg. The
540 540 specified revisions will be cloned or pulled from "source". A shared repo
541 541 will be created at "dest" and a working copy will be created if "update" is
542 542 True.
543 543 """
544 544 revs = None
545 545 if rev:
546 546 if not srcpeer.capable(b'lookup'):
547 547 raise error.Abort(
548 548 _(
549 549 b"src repository does not support "
550 550 b"revision lookup and so doesn't "
551 551 b"support clone by revision"
552 552 )
553 553 )
554 554
555 555 # TODO this is batchable.
556 556 remoterevs = []
557 557 for r in rev:
558 558 with srcpeer.commandexecutor() as e:
559 559 remoterevs.append(
560 560 e.callcommand(
561 561 b'lookup',
562 562 {
563 563 b'key': r,
564 564 },
565 565 ).result()
566 566 )
567 567 revs = remoterevs
568 568
569 569 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 570 # 2 clients may race creating or populating it.
571 571 pooldir = os.path.dirname(sharepath)
572 572 # lock class requires the directory to exist.
573 573 try:
574 574 util.makedir(pooldir, False)
575 575 except FileExistsError:
576 576 pass
577 577
578 578 poolvfs = vfsmod.vfs(pooldir)
579 579 basename = os.path.basename(sharepath)
580 580
581 581 with lock.lock(poolvfs, b'%s.lock' % basename):
582 582 if os.path.exists(sharepath):
583 583 ui.status(
584 584 _(b'(sharing from existing pooled repository %s)\n') % basename
585 585 )
586 586 else:
587 587 ui.status(
588 588 _(b'(sharing from new pooled repository %s)\n') % basename
589 589 )
590 590 # Always use pull mode because hardlinks in share mode don't work
591 591 # well. Never update because working copies aren't necessary in
592 592 # share mode.
593 593 clone(
594 594 ui,
595 595 peeropts,
596 596 source,
597 597 dest=sharepath,
598 598 pull=True,
599 599 revs=rev,
600 600 update=False,
601 601 stream=stream,
602 602 )
603 603
604 604 # Resolve the value to put in [paths] section for the source.
605 605 if islocal(source):
606 606 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 607 else:
608 608 defaultpath = source
609 609
610 610 sharerepo = repository(ui, path=sharepath)
611 611 destrepo = share(
612 612 ui,
613 613 sharerepo,
614 614 dest=dest,
615 615 update=False,
616 616 bookmarks=False,
617 617 defaultpath=defaultpath,
618 618 )
619 619
620 620 # We need to perform a pull against the dest repo to fetch bookmarks
621 621 # and other non-store data that isn't shared by default. In the case of
622 622 # non-existing shared repo, this means we pull from the remote twice. This
623 623 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 624 # way to pull just non-changegroup data.
625 625 exchange.pull(destrepo, srcpeer, heads=revs)
626 626
627 627 _postshareupdate(destrepo, update)
628 628
629 629 return srcpeer, peer(ui, peeropts, dest)
630 630
631 631
632 632 # Recomputing caches is often slow on big repos, so copy them.
633 633 def _copycache(srcrepo, dstcachedir, fname):
634 634 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 635 srcfname = srcrepo.cachevfs.join(fname)
636 636 dstfname = os.path.join(dstcachedir, fname)
637 637 if os.path.exists(srcfname):
638 638 if not os.path.exists(dstcachedir):
639 639 os.mkdir(dstcachedir)
640 640 util.copyfile(srcfname, dstfname)
641 641
642 642
643 643 def clone(
644 644 ui,
645 645 peeropts,
646 646 source,
647 647 dest=None,
648 648 pull=False,
649 649 revs=None,
650 650 update=True,
651 651 stream=False,
652 652 branch=None,
653 653 shareopts=None,
654 654 storeincludepats=None,
655 655 storeexcludepats=None,
656 656 depth=None,
657 657 ):
658 658 """Make a copy of an existing repository.
659 659
660 660 Create a copy of an existing repository in a new directory. The
661 661 source and destination are URLs, as passed to the repository
662 662 function. Returns a pair of repository peers, the source and
663 663 newly created destination.
664 664
665 665 The location of the source is added to the new repository's
666 666 .hg/hgrc file, as the default to be used for future pulls and
667 667 pushes.
668 668
669 669 If an exception is raised, the partly cloned/updated destination
670 670 repository will be deleted.
671 671
672 672 Arguments:
673 673
674 674 source: repository object or URL
675 675
676 676 dest: URL of destination repository to create (defaults to base
677 677 name of source repository)
678 678
679 679 pull: always pull from source repository, even in local case or if the
680 680 server prefers streaming
681 681
682 682 stream: stream raw data uncompressed from repository (fast over
683 683 LAN, slow over WAN)
684 684
685 685 revs: revision to clone up to (implies pull=True)
686 686
687 687 update: update working directory after clone completes, if
688 688 destination is local repository (True means update to default rev,
689 689 anything else is treated as a revision)
690 690
691 691 branch: branches to clone
692 692
693 693 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 694 activates auto sharing mode and defines the directory for stores. The
695 695 "mode" key determines how to construct the directory name of the shared
696 696 repository. "identity" means the name is derived from the node of the first
697 697 changeset in the repository. "remote" means the name is derived from the
698 698 remote's path/URL. Defaults to "identity."
699 699
700 700 storeincludepats and storeexcludepats: sets of file patterns to include and
701 701 exclude in the repository copy, respectively. If not defined, all files
702 702 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 703 only the requested files will be performed. If ``storeincludepats`` is not
704 704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 705 ``path:.``. If both are empty sets, no files will be cloned.
706 706 """
707 707
708 708 if isinstance(source, bytes):
709 709 src_path = urlutil.get_clone_path_obj(ui, source)
710 710 if src_path is None:
711 711 srcpeer = peer(ui, peeropts, b'')
712 712 origsource = source = b''
713 713 branches = (None, branch or [])
714 714 else:
715 715 srcpeer = peer(ui, peeropts, src_path)
716 716 origsource = src_path.rawloc
717 717 branches = (src_path.branch, branch or [])
718 718 source = src_path.loc
719 719 else:
720 if util.safehasattr(source, 'peer'):
721 srcpeer = source.peer() # in case we were called with a localrepo
722 else:
723 srcpeer = source
724 branches = (None, branch or [])
720 725 # XXX path: simply use the peer `path` object when this become available
721 726 srcpeer = source.peer() # in case we were called with a localrepo
722 727 branches = (None, branch or [])
723 728 origsource = source = srcpeer.url()
724 729 srclock = destlock = destwlock = cleandir = None
725 730 destpeer = None
726 731 try:
727 732 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
728 733
729 734 if dest is None:
730 735 dest = defaultdest(source)
731 736 if dest:
732 737 ui.status(_(b"destination directory: %s\n") % dest)
733 738 else:
734 739 dest_path = urlutil.get_clone_path_obj(ui, dest)
735 740 if dest_path is not None:
736 741 dest = dest_path.rawloc
737 742 else:
738 743 dest = b''
739 744
740 745 dest = urlutil.urllocalpath(dest)
741 746 source = urlutil.urllocalpath(source)
742 747
743 748 if not dest:
744 749 raise error.InputError(_(b"empty destination path is not valid"))
745 750
746 751 destvfs = vfsmod.vfs(dest, expandpath=True)
747 752 if destvfs.lexists():
748 753 if not destvfs.isdir():
749 754 raise error.InputError(
750 755 _(b"destination '%s' already exists") % dest
751 756 )
752 757 elif destvfs.listdir():
753 758 raise error.InputError(
754 759 _(b"destination '%s' is not empty") % dest
755 760 )
756 761
757 762 createopts = {}
758 763 narrow = False
759 764
760 765 if storeincludepats is not None:
761 766 narrowspec.validatepatterns(storeincludepats)
762 767 narrow = True
763 768
764 769 if storeexcludepats is not None:
765 770 narrowspec.validatepatterns(storeexcludepats)
766 771 narrow = True
767 772
768 773 if narrow:
769 774 # Include everything by default if only exclusion patterns defined.
770 775 if storeexcludepats and not storeincludepats:
771 776 storeincludepats = {b'path:.'}
772 777
773 778 createopts[b'narrowfiles'] = True
774 779
775 780 if depth:
776 781 createopts[b'shallowfilestore'] = True
777 782
778 783 if srcpeer.capable(b'lfs-serve'):
779 784 # Repository creation honors the config if it disabled the extension, so
780 785 # we can't just announce that lfs will be enabled. This check avoids
781 786 # saying that lfs will be enabled, and then saying it's an unknown
782 787 # feature. The lfs creation option is set in either case so that a
783 788 # requirement is added. If the extension is explicitly disabled but the
784 789 # requirement is set, the clone aborts early, before transferring any
785 790 # data.
786 791 createopts[b'lfs'] = True
787 792
788 793 if extensions.disabled_help(b'lfs'):
789 794 ui.status(
790 795 _(
791 796 b'(remote is using large file support (lfs), but it is '
792 797 b'explicitly disabled in the local configuration)\n'
793 798 )
794 799 )
795 800 else:
796 801 ui.status(
797 802 _(
798 803 b'(remote is using large file support (lfs); lfs will '
799 804 b'be enabled for this repository)\n'
800 805 )
801 806 )
802 807
803 808 shareopts = shareopts or {}
804 809 sharepool = shareopts.get(b'pool')
805 810 sharenamemode = shareopts.get(b'mode')
806 811 if sharepool and islocal(dest):
807 812 sharepath = None
808 813 if sharenamemode == b'identity':
809 814 # Resolve the name from the initial changeset in the remote
810 815 # repository. This returns nullid when the remote is empty. It
811 816 # raises RepoLookupError if revision 0 is filtered or otherwise
812 817 # not available. If we fail to resolve, sharing is not enabled.
813 818 try:
814 819 with srcpeer.commandexecutor() as e:
815 820 rootnode = e.callcommand(
816 821 b'lookup',
817 822 {
818 823 b'key': b'0',
819 824 },
820 825 ).result()
821 826
822 827 if rootnode != sha1nodeconstants.nullid:
823 828 sharepath = os.path.join(sharepool, hex(rootnode))
824 829 else:
825 830 ui.status(
826 831 _(
827 832 b'(not using pooled storage: '
828 833 b'remote appears to be empty)\n'
829 834 )
830 835 )
831 836 except error.RepoLookupError:
832 837 ui.status(
833 838 _(
834 839 b'(not using pooled storage: '
835 840 b'unable to resolve identity of remote)\n'
836 841 )
837 842 )
838 843 elif sharenamemode == b'remote':
839 844 sharepath = os.path.join(
840 845 sharepool, hex(hashutil.sha1(source).digest())
841 846 )
842 847 else:
843 848 raise error.Abort(
844 849 _(b'unknown share naming mode: %s') % sharenamemode
845 850 )
846 851
847 852 # TODO this is a somewhat arbitrary restriction.
848 853 if narrow:
849 854 ui.status(
850 855 _(b'(pooled storage not supported for narrow clones)\n')
851 856 )
852 857 sharepath = None
853 858
854 859 if sharepath:
855 860 return clonewithshare(
856 861 ui,
857 862 peeropts,
858 863 sharepath,
859 864 source,
860 865 srcpeer,
861 866 dest,
862 867 pull=pull,
863 868 rev=revs,
864 869 update=update,
865 870 stream=stream,
866 871 )
867 872
868 873 srcrepo = srcpeer.local()
869 874
870 875 abspath = origsource
871 876 if islocal(origsource):
872 877 abspath = util.abspath(urlutil.urllocalpath(origsource))
873 878
874 879 if islocal(dest):
875 880 if os.path.exists(dest):
876 881 # only clean up directories we create ourselves
877 882 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
878 883 cleandir = hgdir
879 884 else:
880 885 cleandir = dest
881 886
882 887 copy = False
883 888 if (
884 889 srcrepo
885 890 and srcrepo.cancopy()
886 891 and islocal(dest)
887 892 and not phases.hassecret(srcrepo)
888 893 ):
889 894 copy = not pull and not revs
890 895
891 896 # TODO this is a somewhat arbitrary restriction.
892 897 if narrow:
893 898 copy = False
894 899
895 900 if copy:
896 901 try:
897 902 # we use a lock here because if we race with commit, we
898 903 # can end up with extra data in the cloned revlogs that's
899 904 # not pointed to by changesets, thus causing verify to
900 905 # fail
901 906 srclock = srcrepo.lock(wait=False)
902 907 except error.LockError:
903 908 copy = False
904 909
905 910 if copy:
906 911 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
907 912
908 913 destrootpath = urlutil.urllocalpath(dest)
909 914 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
910 915 localrepo.createrepository(
911 916 ui,
912 917 destrootpath,
913 918 requirements=dest_reqs,
914 919 )
915 920 destrepo = localrepo.makelocalrepository(ui, destrootpath)
916 921
917 922 destwlock = destrepo.wlock()
918 923 destlock = destrepo.lock()
919 924 from . import streamclone # avoid cycle
920 925
921 926 streamclone.local_copy(srcrepo, destrepo)
922 927
923 928 # we need to re-init the repo after manually copying the data
924 929 # into it
925 930 destpeer = peer(srcrepo, peeropts, dest)
926 931
927 932 # make the peer aware that is it already locked
928 933 #
929 934 # important:
930 935 #
931 936 # We still need to release that lock at the end of the function
932 937 destpeer.local()._lockref = weakref.ref(destlock)
933 938 destpeer.local()._wlockref = weakref.ref(destwlock)
934 939 # dirstate also needs to be copied because `_wlockref` has a reference
935 940 # to it: this dirstate is saved to disk when the wlock is released
936 941 destpeer.local().dirstate = destrepo.dirstate
937 942
938 943 srcrepo.hook(
939 944 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
940 945 )
941 946 else:
942 947 try:
943 948 # only pass ui when no srcrepo
944 949 destpeer = peer(
945 950 srcrepo or ui,
946 951 peeropts,
947 952 dest,
948 953 create=True,
949 954 createopts=createopts,
950 955 )
951 956 except FileExistsError:
952 957 cleandir = None
953 958 raise error.Abort(_(b"destination '%s' already exists") % dest)
954 959
955 960 if revs:
956 961 if not srcpeer.capable(b'lookup'):
957 962 raise error.Abort(
958 963 _(
959 964 b"src repository does not support "
960 965 b"revision lookup and so doesn't "
961 966 b"support clone by revision"
962 967 )
963 968 )
964 969
965 970 # TODO this is batchable.
966 971 remoterevs = []
967 972 for rev in revs:
968 973 with srcpeer.commandexecutor() as e:
969 974 remoterevs.append(
970 975 e.callcommand(
971 976 b'lookup',
972 977 {
973 978 b'key': rev,
974 979 },
975 980 ).result()
976 981 )
977 982 revs = remoterevs
978 983
979 984 checkout = revs[0]
980 985 else:
981 986 revs = None
982 987 local = destpeer.local()
983 988 if local:
984 989 if narrow:
985 990 with local.wlock(), local.lock():
986 991 local.setnarrowpats(storeincludepats, storeexcludepats)
987 992 narrowspec.copytoworkingcopy(local)
988 993
989 994 u = urlutil.url(abspath)
990 995 defaulturl = bytes(u)
991 996 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992 997 if not stream:
993 998 if pull:
994 999 stream = False
995 1000 else:
996 1001 stream = None
997 1002 # internal config: ui.quietbookmarkmove
998 1003 overrides = {(b'ui', b'quietbookmarkmove'): True}
999 1004 with local.ui.configoverride(overrides, b'clone'):
1000 1005 exchange.pull(
1001 1006 local,
1002 1007 srcpeer,
1003 1008 heads=revs,
1004 1009 streamclonerequested=stream,
1005 1010 includepats=storeincludepats,
1006 1011 excludepats=storeexcludepats,
1007 1012 depth=depth,
1008 1013 )
1009 1014 elif srcrepo:
1010 1015 # TODO lift restriction once exchange.push() accepts narrow
1011 1016 # push.
1012 1017 if narrow:
1013 1018 raise error.Abort(
1014 1019 _(
1015 1020 b'narrow clone not available for '
1016 1021 b'remote destinations'
1017 1022 )
1018 1023 )
1019 1024
1020 1025 exchange.push(
1021 1026 srcrepo,
1022 1027 destpeer,
1023 1028 revs=revs,
1024 1029 bookmarks=srcrepo._bookmarks.keys(),
1025 1030 )
1026 1031 else:
1027 1032 raise error.Abort(
1028 1033 _(b"clone from remote to remote not supported")
1029 1034 )
1030 1035
1031 1036 cleandir = None
1032 1037
1033 1038 destrepo = destpeer.local()
1034 1039 if destrepo:
1035 1040 template = uimod.samplehgrcs[b'cloned']
1036 1041 u = urlutil.url(abspath)
1037 1042 u.passwd = None
1038 1043 defaulturl = bytes(u)
1039 1044 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1040 1045 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1041 1046
1042 1047 if ui.configbool(b'experimental', b'remotenames'):
1043 1048 logexchange.pullremotenames(destrepo, srcpeer)
1044 1049
1045 1050 if update:
1046 1051 if update is not True:
1047 1052 with srcpeer.commandexecutor() as e:
1048 1053 checkout = e.callcommand(
1049 1054 b'lookup',
1050 1055 {
1051 1056 b'key': update,
1052 1057 },
1053 1058 ).result()
1054 1059
1055 1060 uprev = None
1056 1061 status = None
1057 1062 if checkout is not None:
1058 1063 # Some extensions (at least hg-git and hg-subversion) have
1059 1064 # a peer.lookup() implementation that returns a name instead
1060 1065 # of a nodeid. We work around it here until we've figured
1061 1066 # out a better solution.
1062 1067 if len(checkout) == 20 and checkout in destrepo:
1063 1068 uprev = checkout
1064 1069 elif scmutil.isrevsymbol(destrepo, checkout):
1065 1070 uprev = scmutil.revsymbol(destrepo, checkout).node()
1066 1071 else:
1067 1072 if update is not True:
1068 1073 try:
1069 1074 uprev = destrepo.lookup(update)
1070 1075 except error.RepoLookupError:
1071 1076 pass
1072 1077 if uprev is None:
1073 1078 try:
1074 1079 if destrepo._activebookmark:
1075 1080 uprev = destrepo.lookup(destrepo._activebookmark)
1076 1081 update = destrepo._activebookmark
1077 1082 else:
1078 1083 uprev = destrepo._bookmarks[b'@']
1079 1084 update = b'@'
1080 1085 bn = destrepo[uprev].branch()
1081 1086 if bn == b'default':
1082 1087 status = _(b"updating to bookmark %s\n" % update)
1083 1088 else:
1084 1089 status = (
1085 1090 _(b"updating to bookmark %s on branch %s\n")
1086 1091 ) % (update, bn)
1087 1092 except KeyError:
1088 1093 try:
1089 1094 uprev = destrepo.branchtip(b'default')
1090 1095 except error.RepoLookupError:
1091 1096 uprev = destrepo.lookup(b'tip')
1092 1097 if not status:
1093 1098 bn = destrepo[uprev].branch()
1094 1099 status = _(b"updating to branch %s\n") % bn
1095 1100 destrepo.ui.status(status)
1096 1101 _update(destrepo, uprev)
1097 1102 if update in destrepo._bookmarks:
1098 1103 bookmarks.activate(destrepo, update)
1099 1104 if destlock is not None:
1100 1105 release(destlock)
1101 1106 if destwlock is not None:
1102 1107 release(destlock)
1103 1108 # here is a tiny windows were someone could end up writing the
1104 1109 # repository before the cache are sure to be warm. This is "fine"
1105 1110 # as the only "bad" outcome would be some slowness. That potential
1106 1111 # slowness already affect reader.
1107 1112 with destrepo.lock():
1108 1113 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1109 1114 finally:
1110 1115 release(srclock, destlock, destwlock)
1111 1116 if cleandir is not None:
1112 1117 shutil.rmtree(cleandir, True)
1113 1118 if srcpeer is not None:
1114 1119 srcpeer.close()
1115 1120 if destpeer and destpeer.local() is None:
1116 1121 destpeer.close()
1117 1122 return srcpeer, destpeer
1118 1123
1119 1124
1120 1125 def _showstats(repo, stats, quietempty=False):
1121 1126 if quietempty and stats.isempty():
1122 1127 return
1123 1128 repo.ui.status(
1124 1129 _(
1125 1130 b"%d files updated, %d files merged, "
1126 1131 b"%d files removed, %d files unresolved\n"
1127 1132 )
1128 1133 % (
1129 1134 stats.updatedcount,
1130 1135 stats.mergedcount,
1131 1136 stats.removedcount,
1132 1137 stats.unresolvedcount,
1133 1138 )
1134 1139 )
1135 1140
1136 1141
1137 1142 def updaterepo(repo, node, overwrite, updatecheck=None):
1138 1143 """Update the working directory to node.
1139 1144
1140 1145 When overwrite is set, changes are clobbered, merged else
1141 1146
1142 1147 returns stats (see pydoc mercurial.merge.applyupdates)"""
1143 1148 repo.ui.deprecwarn(
1144 1149 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1145 1150 b'5.7',
1146 1151 )
1147 1152 return mergemod._update(
1148 1153 repo,
1149 1154 node,
1150 1155 branchmerge=False,
1151 1156 force=overwrite,
1152 1157 labels=[b'working copy', b'destination'],
1153 1158 updatecheck=updatecheck,
1154 1159 )
1155 1160
1156 1161
1157 1162 def update(repo, node, quietempty=False, updatecheck=None):
1158 1163 """update the working directory to node"""
1159 1164 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1160 1165 _showstats(repo, stats, quietempty)
1161 1166 if stats.unresolvedcount:
1162 1167 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1163 1168 return stats.unresolvedcount > 0
1164 1169
1165 1170
1166 1171 # naming conflict in clone()
1167 1172 _update = update
1168 1173
1169 1174
1170 1175 def clean(repo, node, show_stats=True, quietempty=False):
1171 1176 """forcibly switch the working directory to node, clobbering changes"""
1172 1177 stats = mergemod.clean_update(repo[node])
1173 1178 assert stats.unresolvedcount == 0
1174 1179 if show_stats:
1175 1180 _showstats(repo, stats, quietempty)
1176 1181 return False
1177 1182
1178 1183
1179 1184 # naming conflict in updatetotally()
1180 1185 _clean = clean
1181 1186
1182 1187 _VALID_UPDATECHECKS = {
1183 1188 mergemod.UPDATECHECK_ABORT,
1184 1189 mergemod.UPDATECHECK_NONE,
1185 1190 mergemod.UPDATECHECK_LINEAR,
1186 1191 mergemod.UPDATECHECK_NO_CONFLICT,
1187 1192 }
1188 1193
1189 1194
1190 1195 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1191 1196 """Update the working directory with extra care for non-file components
1192 1197
1193 1198 This takes care of non-file components below:
1194 1199
1195 1200 :bookmark: might be advanced or (in)activated
1196 1201
1197 1202 This takes arguments below:
1198 1203
1199 1204 :checkout: to which revision the working directory is updated
1200 1205 :brev: a name, which might be a bookmark to be activated after updating
1201 1206 :clean: whether changes in the working directory can be discarded
1202 1207 :updatecheck: how to deal with a dirty working directory
1203 1208
1204 1209 Valid values for updatecheck are the UPDATECHECK_* constants
1205 1210 defined in the merge module. Passing `None` will result in using the
1206 1211 configured default.
1207 1212
1208 1213 * ABORT: abort if the working directory is dirty
1209 1214 * NONE: don't check (merge working directory changes into destination)
1210 1215 * LINEAR: check that update is linear before merging working directory
1211 1216 changes into destination
1212 1217 * NO_CONFLICT: check that the update does not result in file merges
1213 1218
1214 1219 This returns whether conflict is detected at updating or not.
1215 1220 """
1216 1221 if updatecheck is None:
1217 1222 updatecheck = ui.config(b'commands', b'update.check')
1218 1223 if updatecheck not in _VALID_UPDATECHECKS:
1219 1224 # If not configured, or invalid value configured
1220 1225 updatecheck = mergemod.UPDATECHECK_LINEAR
1221 1226 if updatecheck not in _VALID_UPDATECHECKS:
1222 1227 raise ValueError(
1223 1228 r'Invalid updatecheck value %r (can accept %r)'
1224 1229 % (updatecheck, _VALID_UPDATECHECKS)
1225 1230 )
1226 1231 with repo.wlock():
1227 1232 movemarkfrom = None
1228 1233 warndest = False
1229 1234 if checkout is None:
1230 1235 updata = destutil.destupdate(repo, clean=clean)
1231 1236 checkout, movemarkfrom, brev = updata
1232 1237 warndest = True
1233 1238
1234 1239 if clean:
1235 1240 ret = _clean(repo, checkout)
1236 1241 else:
1237 1242 if updatecheck == mergemod.UPDATECHECK_ABORT:
1238 1243 cmdutil.bailifchanged(repo, merge=False)
1239 1244 updatecheck = mergemod.UPDATECHECK_NONE
1240 1245 ret = _update(repo, checkout, updatecheck=updatecheck)
1241 1246
1242 1247 if not ret and movemarkfrom:
1243 1248 if movemarkfrom == repo[b'.'].node():
1244 1249 pass # no-op update
1245 1250 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1246 1251 b = ui.label(repo._activebookmark, b'bookmarks.active')
1247 1252 ui.status(_(b"updating bookmark %s\n") % b)
1248 1253 else:
1249 1254 # this can happen with a non-linear update
1250 1255 b = ui.label(repo._activebookmark, b'bookmarks')
1251 1256 ui.status(_(b"(leaving bookmark %s)\n") % b)
1252 1257 bookmarks.deactivate(repo)
1253 1258 elif brev in repo._bookmarks:
1254 1259 if brev != repo._activebookmark:
1255 1260 b = ui.label(brev, b'bookmarks.active')
1256 1261 ui.status(_(b"(activating bookmark %s)\n") % b)
1257 1262 bookmarks.activate(repo, brev)
1258 1263 elif brev:
1259 1264 if repo._activebookmark:
1260 1265 b = ui.label(repo._activebookmark, b'bookmarks')
1261 1266 ui.status(_(b"(leaving bookmark %s)\n") % b)
1262 1267 bookmarks.deactivate(repo)
1263 1268
1264 1269 if warndest:
1265 1270 destutil.statusotherdests(ui, repo)
1266 1271
1267 1272 return ret
1268 1273
1269 1274
1270 1275 def merge(
1271 1276 ctx,
1272 1277 force=False,
1273 1278 remind=True,
1274 1279 labels=None,
1275 1280 ):
1276 1281 """Branch merge with node, resolving changes. Return true if any
1277 1282 unresolved conflicts."""
1278 1283 repo = ctx.repo()
1279 1284 stats = mergemod.merge(ctx, force=force, labels=labels)
1280 1285 _showstats(repo, stats)
1281 1286 if stats.unresolvedcount:
1282 1287 repo.ui.status(
1283 1288 _(
1284 1289 b"use 'hg resolve' to retry unresolved file merges "
1285 1290 b"or 'hg merge --abort' to abandon\n"
1286 1291 )
1287 1292 )
1288 1293 elif remind:
1289 1294 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1290 1295 return stats.unresolvedcount > 0
1291 1296
1292 1297
1293 1298 def abortmerge(ui, repo):
1294 1299 ms = mergestatemod.mergestate.read(repo)
1295 1300 if ms.active():
1296 1301 # there were conflicts
1297 1302 node = ms.localctx.hex()
1298 1303 else:
1299 1304 # there were no conficts, mergestate was not stored
1300 1305 node = repo[b'.'].hex()
1301 1306
1302 1307 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1303 1308 stats = mergemod.clean_update(repo[node])
1304 1309 assert stats.unresolvedcount == 0
1305 1310 _showstats(repo, stats)
1306 1311
1307 1312
1308 1313 def _incoming(
1309 1314 displaychlist,
1310 1315 subreporecurse,
1311 1316 ui,
1312 1317 repo,
1313 1318 source,
1314 1319 opts,
1315 1320 buffered=False,
1316 1321 subpath=None,
1317 1322 ):
1318 1323 """
1319 1324 Helper for incoming / gincoming.
1320 1325 displaychlist gets called with
1321 1326 (remoterepo, incomingchangesetlist, displayer) parameters,
1322 1327 and is supposed to contain only code that can't be unified.
1323 1328 """
1324 1329 srcs = urlutil.get_pull_paths(repo, ui, [source])
1325 1330 srcs = list(srcs)
1326 1331 if len(srcs) != 1:
1327 1332 msg = _(b'for now, incoming supports only a single source, %d provided')
1328 1333 msg %= len(srcs)
1329 1334 raise error.Abort(msg)
1330 1335 path = srcs[0]
1331 1336 if subpath is None:
1332 1337 peer_path = path
1333 1338 url = path.loc
1334 1339 else:
1335 1340 # XXX path: we are losing the `path` object here. Keeping it would be
1336 1341 # valuable. For example as a "variant" as we do for pushes.
1337 1342 subpath = urlutil.url(subpath)
1338 1343 if subpath.isabs():
1339 1344 peer_path = url = bytes(subpath)
1340 1345 else:
1341 1346 p = urlutil.url(path.loc)
1342 1347 if p.islocal():
1343 1348 normpath = os.path.normpath
1344 1349 else:
1345 1350 normpath = posixpath.normpath
1346 1351 p.path = normpath(b'%s/%s' % (p.path, subpath))
1347 1352 peer_path = url = bytes(p)
1348 1353 other = peer(repo, opts, peer_path)
1349 1354 cleanupfn = other.close
1350 1355 try:
1351 1356 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1352 1357 branches = (path.branch, opts.get(b'branch', []))
1353 1358 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1354 1359
1355 1360 if revs:
1356 1361 revs = [other.lookup(rev) for rev in revs]
1357 1362 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1358 1363 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1359 1364 )
1360 1365
1361 1366 if not chlist:
1362 1367 ui.status(_(b"no changes found\n"))
1363 1368 return subreporecurse()
1364 1369 ui.pager(b'incoming')
1365 1370 displayer = logcmdutil.changesetdisplayer(
1366 1371 ui, other, opts, buffered=buffered
1367 1372 )
1368 1373 displaychlist(other, chlist, displayer)
1369 1374 displayer.close()
1370 1375 finally:
1371 1376 cleanupfn()
1372 1377 subreporecurse()
1373 1378 return 0 # exit code is zero since we found incoming changes
1374 1379
1375 1380
1376 1381 def incoming(ui, repo, source, opts, subpath=None):
1377 1382 def subreporecurse():
1378 1383 ret = 1
1379 1384 if opts.get(b'subrepos'):
1380 1385 ctx = repo[None]
1381 1386 for subpath in sorted(ctx.substate):
1382 1387 sub = ctx.sub(subpath)
1383 1388 ret = min(ret, sub.incoming(ui, source, opts))
1384 1389 return ret
1385 1390
1386 1391 def display(other, chlist, displayer):
1387 1392 limit = logcmdutil.getlimit(opts)
1388 1393 if opts.get(b'newest_first'):
1389 1394 chlist.reverse()
1390 1395 count = 0
1391 1396 for n in chlist:
1392 1397 if limit is not None and count >= limit:
1393 1398 break
1394 1399 parents = [
1395 1400 p for p in other.changelog.parents(n) if p != repo.nullid
1396 1401 ]
1397 1402 if opts.get(b'no_merges') and len(parents) == 2:
1398 1403 continue
1399 1404 count += 1
1400 1405 displayer.show(other[n])
1401 1406
1402 1407 return _incoming(
1403 1408 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1404 1409 )
1405 1410
1406 1411
1407 1412 def _outgoing(ui, repo, dests, opts, subpath=None):
1408 1413 out = set()
1409 1414 others = []
1410 1415 for path in urlutil.get_push_paths(repo, ui, dests):
1411 1416 dest = path.loc
1412 1417 if subpath is not None:
1413 1418 subpath = urlutil.url(subpath)
1414 1419 if subpath.isabs():
1415 1420 dest = bytes(subpath)
1416 1421 else:
1417 1422 p = urlutil.url(dest)
1418 1423 if p.islocal():
1419 1424 normpath = os.path.normpath
1420 1425 else:
1421 1426 normpath = posixpath.normpath
1422 1427 p.path = normpath(b'%s/%s' % (p.path, subpath))
1423 1428 dest = bytes(p)
1424 1429 branches = path.branch, opts.get(b'branch') or []
1425 1430
1426 1431 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1427 1432 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1428 1433 if revs:
1429 1434 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1430 1435
1431 1436 other = peer(repo, opts, dest)
1432 1437 try:
1433 1438 outgoing = discovery.findcommonoutgoing(
1434 1439 repo, other, revs, force=opts.get(b'force')
1435 1440 )
1436 1441 o = outgoing.missing
1437 1442 out.update(o)
1438 1443 if not o:
1439 1444 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1440 1445 others.append(other)
1441 1446 except: # re-raises
1442 1447 other.close()
1443 1448 raise
1444 1449 # make sure this is ordered by revision number
1445 1450 outgoing_revs = list(out)
1446 1451 cl = repo.changelog
1447 1452 outgoing_revs.sort(key=cl.rev)
1448 1453 return outgoing_revs, others
1449 1454
1450 1455
1451 1456 def _outgoing_recurse(ui, repo, dests, opts):
1452 1457 ret = 1
1453 1458 if opts.get(b'subrepos'):
1454 1459 ctx = repo[None]
1455 1460 for subpath in sorted(ctx.substate):
1456 1461 sub = ctx.sub(subpath)
1457 1462 ret = min(ret, sub.outgoing(ui, dests, opts))
1458 1463 return ret
1459 1464
1460 1465
1461 1466 def _outgoing_filter(repo, revs, opts):
1462 1467 """apply revision filtering/ordering option for outgoing"""
1463 1468 limit = logcmdutil.getlimit(opts)
1464 1469 no_merges = opts.get(b'no_merges')
1465 1470 if opts.get(b'newest_first'):
1466 1471 revs.reverse()
1467 1472 if limit is None and not no_merges:
1468 1473 for r in revs:
1469 1474 yield r
1470 1475 return
1471 1476
1472 1477 count = 0
1473 1478 cl = repo.changelog
1474 1479 for n in revs:
1475 1480 if limit is not None and count >= limit:
1476 1481 break
1477 1482 parents = [p for p in cl.parents(n) if p != repo.nullid]
1478 1483 if no_merges and len(parents) == 2:
1479 1484 continue
1480 1485 count += 1
1481 1486 yield n
1482 1487
1483 1488
1484 1489 def outgoing(ui, repo, dests, opts, subpath=None):
1485 1490 if opts.get(b'graph'):
1486 1491 logcmdutil.checkunsupportedgraphflags([], opts)
1487 1492 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1488 1493 ret = 1
1489 1494 try:
1490 1495 if o:
1491 1496 ret = 0
1492 1497
1493 1498 if opts.get(b'graph'):
1494 1499 revdag = logcmdutil.graphrevs(repo, o, opts)
1495 1500 ui.pager(b'outgoing')
1496 1501 displayer = logcmdutil.changesetdisplayer(
1497 1502 ui, repo, opts, buffered=True
1498 1503 )
1499 1504 logcmdutil.displaygraph(
1500 1505 ui, repo, revdag, displayer, graphmod.asciiedges
1501 1506 )
1502 1507 else:
1503 1508 ui.pager(b'outgoing')
1504 1509 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1505 1510 for n in _outgoing_filter(repo, o, opts):
1506 1511 displayer.show(repo[n])
1507 1512 displayer.close()
1508 1513 for oth in others:
1509 1514 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1510 1515 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1511 1516 return ret # exit code is zero since we found outgoing changes
1512 1517 finally:
1513 1518 for oth in others:
1514 1519 oth.close()
1515 1520
1516 1521
1517 1522 def verify(repo, level=None):
1518 1523 """verify the consistency of a repository"""
1519 1524 ret = verifymod.verify(repo, level=level)
1520 1525
1521 1526 # Broken subrepo references in hidden csets don't seem worth worrying about,
1522 1527 # since they can't be pushed/pulled, and --hidden can be used if they are a
1523 1528 # concern.
1524 1529
1525 1530 # pathto() is needed for -R case
1526 1531 revs = repo.revs(
1527 1532 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1528 1533 )
1529 1534
1530 1535 if revs:
1531 1536 repo.ui.status(_(b'checking subrepo links\n'))
1532 1537 for rev in revs:
1533 1538 ctx = repo[rev]
1534 1539 try:
1535 1540 for subpath in ctx.substate:
1536 1541 try:
1537 1542 ret = (
1538 1543 ctx.sub(subpath, allowcreate=False).verify() or ret
1539 1544 )
1540 1545 except error.RepoError as e:
1541 1546 repo.ui.warn(b'%d: %s\n' % (rev, e))
1542 1547 except Exception:
1543 1548 repo.ui.warn(
1544 1549 _(b'.hgsubstate is corrupt in revision %s\n')
1545 1550 % short(ctx.node())
1546 1551 )
1547 1552
1548 1553 return ret
1549 1554
1550 1555
1551 1556 def remoteui(src, opts):
1552 1557 """build a remote ui from ui or repo and opts"""
1553 1558 if util.safehasattr(src, b'baseui'): # looks like a repository
1554 1559 dst = src.baseui.copy() # drop repo-specific config
1555 1560 src = src.ui # copy target options from repo
1556 1561 else: # assume it's a global ui object
1557 1562 dst = src.copy() # keep all global options
1558 1563
1559 1564 # copy ssh-specific options
1560 1565 for o in b'ssh', b'remotecmd':
1561 1566 v = opts.get(o) or src.config(b'ui', o)
1562 1567 if v:
1563 1568 dst.setconfig(b"ui", o, v, b'copied')
1564 1569
1565 1570 # copy bundle-specific options
1566 1571 r = src.config(b'bundle', b'mainreporoot')
1567 1572 if r:
1568 1573 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1569 1574
1570 1575 # copy selected local settings to the remote ui
1571 1576 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1572 1577 for key, val in src.configitems(sect):
1573 1578 dst.setconfig(sect, key, val, b'copied')
1574 1579 v = src.config(b'web', b'cacerts')
1575 1580 if v:
1576 1581 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1577 1582
1578 1583 return dst
1579 1584
1580 1585
1581 1586 # Files of interest
1582 1587 # Used to check if the repository has changed looking at mtime and size of
1583 1588 # these files.
1584 1589 foi = [
1585 1590 (b'spath', b'00changelog.i'),
1586 1591 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1587 1592 (b'spath', b'obsstore'),
1588 1593 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1589 1594 ]
1590 1595
1591 1596
1592 1597 class cachedlocalrepo:
1593 1598 """Holds a localrepository that can be cached and reused."""
1594 1599
1595 1600 def __init__(self, repo):
1596 1601 """Create a new cached repo from an existing repo.
1597 1602
1598 1603 We assume the passed in repo was recently created. If the
1599 1604 repo has changed between when it was created and when it was
1600 1605 turned into a cache, it may not refresh properly.
1601 1606 """
1602 1607 assert isinstance(repo, localrepo.localrepository)
1603 1608 self._repo = repo
1604 1609 self._state, self.mtime = self._repostate()
1605 1610 self._filtername = repo.filtername
1606 1611
1607 1612 def fetch(self):
1608 1613 """Refresh (if necessary) and return a repository.
1609 1614
1610 1615 If the cached instance is out of date, it will be recreated
1611 1616 automatically and returned.
1612 1617
1613 1618 Returns a tuple of the repo and a boolean indicating whether a new
1614 1619 repo instance was created.
1615 1620 """
1616 1621 # We compare the mtimes and sizes of some well-known files to
1617 1622 # determine if the repo changed. This is not precise, as mtimes
1618 1623 # are susceptible to clock skew and imprecise filesystems and
1619 1624 # file content can change while maintaining the same size.
1620 1625
1621 1626 state, mtime = self._repostate()
1622 1627 if state == self._state:
1623 1628 return self._repo, False
1624 1629
1625 1630 repo = repository(self._repo.baseui, self._repo.url())
1626 1631 if self._filtername:
1627 1632 self._repo = repo.filtered(self._filtername)
1628 1633 else:
1629 1634 self._repo = repo.unfiltered()
1630 1635 self._state = state
1631 1636 self.mtime = mtime
1632 1637
1633 1638 return self._repo, True
1634 1639
1635 1640 def _repostate(self):
1636 1641 state = []
1637 1642 maxmtime = -1
1638 1643 for attr, fname in foi:
1639 1644 prefix = getattr(self._repo, attr)
1640 1645 p = os.path.join(prefix, fname)
1641 1646 try:
1642 1647 st = os.stat(p)
1643 1648 except OSError:
1644 1649 st = os.stat(prefix)
1645 1650 state.append((st[stat.ST_MTIME], st.st_size))
1646 1651 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1647 1652
1648 1653 return tuple(state), maxmtime
1649 1654
1650 1655 def copy(self):
1651 1656 """Obtain a copy of this class instance.
1652 1657
1653 1658 A new localrepository instance is obtained. The new instance should be
1654 1659 completely independent of the original.
1655 1660 """
1656 1661 repo = repository(self._repo.baseui, self._repo.origroot)
1657 1662 if self._filtername:
1658 1663 repo = repo.filtered(self._filtername)
1659 1664 else:
1660 1665 repo = repo.unfiltered()
1661 1666 c = cachedlocalrepo(repo)
1662 1667 c._state = self._state
1663 1668 c.mtime = self.mtime
1664 1669 return c
General Comments 0
You need to be logged in to leave comments. Login now