##// END OF EJS Templates
path: have `peer` constructor accept a `path` object...
marmoute -
r50602:aa36771e default
parent child Browse files
Show More
@@ -1,1636 +1,1642 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 146 repo_schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 150 }
151 151
152 152 peer_schemes = {
153 153 b'http': httppeer,
154 154 b'https': httppeer,
155 155 b'ssh': sshpeer,
156 156 b'static-http': statichttprepo,
157 157 }
158 158
159 159
160 160 def _peerlookup(path):
161 161 u = urlutil.url(path)
162 162 scheme = u.scheme or b'file'
163 163 if scheme in peer_schemes:
164 164 return peer_schemes[scheme]
165 165 if scheme in repo_schemes:
166 166 return repo_schemes[scheme]
167 167 return LocalFactory
168 168
169 169
170 170 def islocal(repo):
171 171 '''return true if repo (or path pointing to repo) is local'''
172 172 if isinstance(repo, bytes):
173 173 cls = _peerlookup(repo)
174 174 cls.instance # make sure we load the module
175 175 if util.safehasattr(cls, 'islocal'):
176 176 return cls.islocal(repo) # pytype: disable=module-attr
177 177 return False
178 178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 179 return repo.local()
180 180
181 181
182 182 def openpath(ui, path, sendaccept=True):
183 183 '''open path with open if local, url.open if remote'''
184 184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 185 if pathurl.islocal():
186 186 return util.posixfile(pathurl.localpath(), b'rb')
187 187 else:
188 188 return url.open(ui, path, sendaccept=sendaccept)
189 189
190 190
191 191 # a list of (ui, repo) functions called for wire peer initialization
192 192 wirepeersetupfuncs = []
193 193
194 194
195 195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 msg = b' > reposetup for %s took %s\n'
208 208 ui.log(b'extension', msg, name, stats)
209 209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 210 if not obj.local():
211 211 for f in wirepeersetupfuncs:
212 212 f(ui, obj)
213 213
214 214
215 215 def repository(
216 216 ui,
217 217 path=b'',
218 218 create=False,
219 219 presetupfuncs=None,
220 220 intents=None,
221 221 createopts=None,
222 222 ):
223 223 """return a repository object for the specified path"""
224 224 scheme = urlutil.url(path).scheme
225 225 if scheme is None:
226 226 scheme = b'file'
227 227 cls = repo_schemes.get(scheme)
228 228 if cls is None:
229 229 if scheme in peer_schemes:
230 230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 231 cls = LocalFactory
232 232 repo = cls.instance(
233 233 ui,
234 234 path,
235 235 create,
236 236 intents=intents,
237 237 createopts=createopts,
238 238 )
239 239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 240 return repo.filtered(b'visible')
241 241
242 242
243 243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 244 '''return a repository peer for the specified path'''
245 245 rui = remoteui(uiorrepo, opts)
246 scheme = urlutil.url(path).scheme
246 if util.safehasattr(path, 'url'):
247 # this is a urlutil.path object
248 scheme = path.url.scheme # pytype: disable=attribute-error
249 # XXX for now we don't do anything more than that
250 path = path.loc # pytype: disable=attribute-error
251 else:
252 scheme = urlutil.url(path).scheme
247 253 if scheme in peer_schemes:
248 254 cls = peer_schemes[scheme]
249 255 peer = cls.instance(
250 256 rui,
251 257 path,
252 258 create,
253 259 intents=intents,
254 260 createopts=createopts,
255 261 )
256 262 _setup_repo_or_peer(rui, peer)
257 263 else:
258 264 # this is a repository
259 265 repo = repository(
260 266 rui,
261 267 path,
262 268 create,
263 269 intents=intents,
264 270 createopts=createopts,
265 271 )
266 272 peer = repo.peer()
267 273 return peer
268 274
269 275
270 276 def defaultdest(source):
271 277 """return default destination of clone if none is given
272 278
273 279 >>> defaultdest(b'foo')
274 280 'foo'
275 281 >>> defaultdest(b'/foo/bar')
276 282 'bar'
277 283 >>> defaultdest(b'/')
278 284 ''
279 285 >>> defaultdest(b'')
280 286 ''
281 287 >>> defaultdest(b'http://example.org/')
282 288 ''
283 289 >>> defaultdest(b'http://example.org/foo/')
284 290 'foo'
285 291 """
286 292 path = urlutil.url(source).path
287 293 if not path:
288 294 return b''
289 295 return os.path.basename(os.path.normpath(path))
290 296
291 297
292 298 def sharedreposource(repo):
293 299 """Returns repository object for source repository of a shared repo.
294 300
295 301 If repo is not a shared repository, returns None.
296 302 """
297 303 if repo.sharedpath == repo.path:
298 304 return None
299 305
300 306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
301 307 return repo.srcrepo
302 308
303 309 # the sharedpath always ends in the .hg; we want the path to the repo
304 310 source = repo.vfs.split(repo.sharedpath)[0]
305 311 srcurl, branches = urlutil.parseurl(source)
306 312 srcrepo = repository(repo.ui, srcurl)
307 313 repo.srcrepo = srcrepo
308 314 return srcrepo
309 315
310 316
311 317 def share(
312 318 ui,
313 319 source,
314 320 dest=None,
315 321 update=True,
316 322 bookmarks=True,
317 323 defaultpath=None,
318 324 relative=False,
319 325 ):
320 326 '''create a shared repository'''
321 327
322 328 not_local_msg = _(b'can only share local repositories')
323 329 if util.safehasattr(source, 'local'):
324 330 if source.local() is None:
325 331 raise error.Abort(not_local_msg)
326 332 elif not islocal(source):
327 333 # XXX why are we getting bytes here ?
328 334 raise error.Abort(not_local_msg)
329 335
330 336 if not dest:
331 337 dest = defaultdest(source)
332 338 else:
333 339 dest = urlutil.get_clone_path(ui, dest)[1]
334 340
335 341 if isinstance(source, bytes):
336 342 origsource, source, branches = urlutil.get_clone_path(ui, source)
337 343 srcrepo = repository(ui, source)
338 344 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
339 345 else:
340 346 srcrepo = source.local()
341 347 checkout = None
342 348
343 349 shareditems = set()
344 350 if bookmarks:
345 351 shareditems.add(sharedbookmarks)
346 352
347 353 r = repository(
348 354 ui,
349 355 dest,
350 356 create=True,
351 357 createopts={
352 358 b'sharedrepo': srcrepo,
353 359 b'sharedrelative': relative,
354 360 b'shareditems': shareditems,
355 361 },
356 362 )
357 363
358 364 postshare(srcrepo, r, defaultpath=defaultpath)
359 365 r = repository(ui, dest)
360 366 _postshareupdate(r, update, checkout=checkout)
361 367 return r
362 368
363 369
364 370 def _prependsourcehgrc(repo):
365 371 """copies the source repo config and prepend it in current repo .hg/hgrc
366 372 on unshare. This is only done if the share was perfomed using share safe
367 373 method where we share config of source in shares"""
368 374 srcvfs = vfsmod.vfs(repo.sharedpath)
369 375 dstvfs = vfsmod.vfs(repo.path)
370 376
371 377 if not srcvfs.exists(b'hgrc'):
372 378 return
373 379
374 380 currentconfig = b''
375 381 if dstvfs.exists(b'hgrc'):
376 382 currentconfig = dstvfs.read(b'hgrc')
377 383
378 384 with dstvfs(b'hgrc', b'wb') as fp:
379 385 sourceconfig = srcvfs.read(b'hgrc')
380 386 fp.write(b"# Config copied from shared source\n")
381 387 fp.write(sourceconfig)
382 388 fp.write(b'\n')
383 389 fp.write(currentconfig)
384 390
385 391
386 392 def unshare(ui, repo):
387 393 """convert a shared repository to a normal one
388 394
389 395 Copy the store data to the repo and remove the sharedpath data.
390 396
391 397 Returns a new repository object representing the unshared repository.
392 398
393 399 The passed repository object is not usable after this function is
394 400 called.
395 401 """
396 402
397 403 with repo.lock():
398 404 # we use locks here because if we race with commit, we
399 405 # can end up with extra data in the cloned revlogs that's
400 406 # not pointed to by changesets, thus causing verify to
401 407 # fail
402 408 destlock = copystore(ui, repo, repo.path)
403 409 with destlock or util.nullcontextmanager():
404 410 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
405 411 # we were sharing .hg/hgrc of the share source with the current
406 412 # repo. We need to copy that while unsharing otherwise it can
407 413 # disable hooks and other checks
408 414 _prependsourcehgrc(repo)
409 415
410 416 sharefile = repo.vfs.join(b'sharedpath')
411 417 util.rename(sharefile, sharefile + b'.old')
412 418
413 419 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
414 420 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
415 421 scmutil.writereporequirements(repo)
416 422
417 423 # Removing share changes some fundamental properties of the repo instance.
418 424 # So we instantiate a new repo object and operate on it rather than
419 425 # try to keep the existing repo usable.
420 426 newrepo = repository(repo.baseui, repo.root, create=False)
421 427
422 428 # TODO: figure out how to access subrepos that exist, but were previously
423 429 # removed from .hgsub
424 430 c = newrepo[b'.']
425 431 subs = c.substate
426 432 for s in sorted(subs):
427 433 c.sub(s).unshare()
428 434
429 435 localrepo.poisonrepository(repo)
430 436
431 437 return newrepo
432 438
433 439
434 440 def postshare(sourcerepo, destrepo, defaultpath=None):
435 441 """Called after a new shared repo is created.
436 442
437 443 The new repo only has a requirements file and pointer to the source.
438 444 This function configures additional shared data.
439 445
440 446 Extensions can wrap this function and write additional entries to
441 447 destrepo/.hg/shared to indicate additional pieces of data to be shared.
442 448 """
443 449 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
444 450 if default:
445 451 template = b'[paths]\ndefault = %s\n'
446 452 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
447 453 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
448 454 with destrepo.wlock():
449 455 narrowspec.copytoworkingcopy(destrepo)
450 456
451 457
452 458 def _postshareupdate(repo, update, checkout=None):
453 459 """Maybe perform a working directory update after a shared repo is created.
454 460
455 461 ``update`` can be a boolean or a revision to update to.
456 462 """
457 463 if not update:
458 464 return
459 465
460 466 repo.ui.status(_(b"updating working directory\n"))
461 467 if update is not True:
462 468 checkout = update
463 469 for test in (checkout, b'default', b'tip'):
464 470 if test is None:
465 471 continue
466 472 try:
467 473 uprev = repo.lookup(test)
468 474 break
469 475 except error.RepoLookupError:
470 476 continue
471 477 _update(repo, uprev)
472 478
473 479
474 480 def copystore(ui, srcrepo, destpath):
475 481 """copy files from store of srcrepo in destpath
476 482
477 483 returns destlock
478 484 """
479 485 destlock = None
480 486 try:
481 487 hardlink = None
482 488 topic = _(b'linking') if hardlink else _(b'copying')
483 489 with ui.makeprogress(topic, unit=_(b'files')) as progress:
484 490 num = 0
485 491 srcpublishing = srcrepo.publishing()
486 492 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
487 493 dstvfs = vfsmod.vfs(destpath)
488 494 for f in srcrepo.store.copylist():
489 495 if srcpublishing and f.endswith(b'phaseroots'):
490 496 continue
491 497 dstbase = os.path.dirname(f)
492 498 if dstbase and not dstvfs.exists(dstbase):
493 499 dstvfs.mkdir(dstbase)
494 500 if srcvfs.exists(f):
495 501 if f.endswith(b'data'):
496 502 # 'dstbase' may be empty (e.g. revlog format 0)
497 503 lockfile = os.path.join(dstbase, b"lock")
498 504 # lock to avoid premature writing to the target
499 505 destlock = lock.lock(dstvfs, lockfile)
500 506 hardlink, n = util.copyfiles(
501 507 srcvfs.join(f), dstvfs.join(f), hardlink, progress
502 508 )
503 509 num += n
504 510 if hardlink:
505 511 ui.debug(b"linked %d files\n" % num)
506 512 else:
507 513 ui.debug(b"copied %d files\n" % num)
508 514 return destlock
509 515 except: # re-raises
510 516 release(destlock)
511 517 raise
512 518
513 519
514 520 def clonewithshare(
515 521 ui,
516 522 peeropts,
517 523 sharepath,
518 524 source,
519 525 srcpeer,
520 526 dest,
521 527 pull=False,
522 528 rev=None,
523 529 update=True,
524 530 stream=False,
525 531 ):
526 532 """Perform a clone using a shared repo.
527 533
528 534 The store for the repository will be located at <sharepath>/.hg. The
529 535 specified revisions will be cloned or pulled from "source". A shared repo
530 536 will be created at "dest" and a working copy will be created if "update" is
531 537 True.
532 538 """
533 539 revs = None
534 540 if rev:
535 541 if not srcpeer.capable(b'lookup'):
536 542 raise error.Abort(
537 543 _(
538 544 b"src repository does not support "
539 545 b"revision lookup and so doesn't "
540 546 b"support clone by revision"
541 547 )
542 548 )
543 549
544 550 # TODO this is batchable.
545 551 remoterevs = []
546 552 for r in rev:
547 553 with srcpeer.commandexecutor() as e:
548 554 remoterevs.append(
549 555 e.callcommand(
550 556 b'lookup',
551 557 {
552 558 b'key': r,
553 559 },
554 560 ).result()
555 561 )
556 562 revs = remoterevs
557 563
558 564 # Obtain a lock before checking for or cloning the pooled repo otherwise
559 565 # 2 clients may race creating or populating it.
560 566 pooldir = os.path.dirname(sharepath)
561 567 # lock class requires the directory to exist.
562 568 try:
563 569 util.makedir(pooldir, False)
564 570 except FileExistsError:
565 571 pass
566 572
567 573 poolvfs = vfsmod.vfs(pooldir)
568 574 basename = os.path.basename(sharepath)
569 575
570 576 with lock.lock(poolvfs, b'%s.lock' % basename):
571 577 if os.path.exists(sharepath):
572 578 ui.status(
573 579 _(b'(sharing from existing pooled repository %s)\n') % basename
574 580 )
575 581 else:
576 582 ui.status(
577 583 _(b'(sharing from new pooled repository %s)\n') % basename
578 584 )
579 585 # Always use pull mode because hardlinks in share mode don't work
580 586 # well. Never update because working copies aren't necessary in
581 587 # share mode.
582 588 clone(
583 589 ui,
584 590 peeropts,
585 591 source,
586 592 dest=sharepath,
587 593 pull=True,
588 594 revs=rev,
589 595 update=False,
590 596 stream=stream,
591 597 )
592 598
593 599 # Resolve the value to put in [paths] section for the source.
594 600 if islocal(source):
595 601 defaultpath = util.abspath(urlutil.urllocalpath(source))
596 602 else:
597 603 defaultpath = source
598 604
599 605 sharerepo = repository(ui, path=sharepath)
600 606 destrepo = share(
601 607 ui,
602 608 sharerepo,
603 609 dest=dest,
604 610 update=False,
605 611 bookmarks=False,
606 612 defaultpath=defaultpath,
607 613 )
608 614
609 615 # We need to perform a pull against the dest repo to fetch bookmarks
610 616 # and other non-store data that isn't shared by default. In the case of
611 617 # non-existing shared repo, this means we pull from the remote twice. This
612 618 # is a bit weird. But at the time it was implemented, there wasn't an easy
613 619 # way to pull just non-changegroup data.
614 620 exchange.pull(destrepo, srcpeer, heads=revs)
615 621
616 622 _postshareupdate(destrepo, update)
617 623
618 624 return srcpeer, peer(ui, peeropts, dest)
619 625
620 626
621 627 # Recomputing caches is often slow on big repos, so copy them.
622 628 def _copycache(srcrepo, dstcachedir, fname):
623 629 """copy a cache from srcrepo to destcachedir (if it exists)"""
624 630 srcfname = srcrepo.cachevfs.join(fname)
625 631 dstfname = os.path.join(dstcachedir, fname)
626 632 if os.path.exists(srcfname):
627 633 if not os.path.exists(dstcachedir):
628 634 os.mkdir(dstcachedir)
629 635 util.copyfile(srcfname, dstfname)
630 636
631 637
632 638 def clone(
633 639 ui,
634 640 peeropts,
635 641 source,
636 642 dest=None,
637 643 pull=False,
638 644 revs=None,
639 645 update=True,
640 646 stream=False,
641 647 branch=None,
642 648 shareopts=None,
643 649 storeincludepats=None,
644 650 storeexcludepats=None,
645 651 depth=None,
646 652 ):
647 653 """Make a copy of an existing repository.
648 654
649 655 Create a copy of an existing repository in a new directory. The
650 656 source and destination are URLs, as passed to the repository
651 657 function. Returns a pair of repository peers, the source and
652 658 newly created destination.
653 659
654 660 The location of the source is added to the new repository's
655 661 .hg/hgrc file, as the default to be used for future pulls and
656 662 pushes.
657 663
658 664 If an exception is raised, the partly cloned/updated destination
659 665 repository will be deleted.
660 666
661 667 Arguments:
662 668
663 669 source: repository object or URL
664 670
665 671 dest: URL of destination repository to create (defaults to base
666 672 name of source repository)
667 673
668 674 pull: always pull from source repository, even in local case or if the
669 675 server prefers streaming
670 676
671 677 stream: stream raw data uncompressed from repository (fast over
672 678 LAN, slow over WAN)
673 679
674 680 revs: revision to clone up to (implies pull=True)
675 681
676 682 update: update working directory after clone completes, if
677 683 destination is local repository (True means update to default rev,
678 684 anything else is treated as a revision)
679 685
680 686 branch: branches to clone
681 687
682 688 shareopts: dict of options to control auto sharing behavior. The "pool" key
683 689 activates auto sharing mode and defines the directory for stores. The
684 690 "mode" key determines how to construct the directory name of the shared
685 691 repository. "identity" means the name is derived from the node of the first
686 692 changeset in the repository. "remote" means the name is derived from the
687 693 remote's path/URL. Defaults to "identity."
688 694
689 695 storeincludepats and storeexcludepats: sets of file patterns to include and
690 696 exclude in the repository copy, respectively. If not defined, all files
691 697 will be included (a "full" clone). Otherwise a "narrow" clone containing
692 698 only the requested files will be performed. If ``storeincludepats`` is not
693 699 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
694 700 ``path:.``. If both are empty sets, no files will be cloned.
695 701 """
696 702
697 703 if isinstance(source, bytes):
698 704 src = urlutil.get_clone_path(ui, source, branch)
699 705 origsource, source, branches = src
700 706 srcpeer = peer(ui, peeropts, source)
701 707 else:
702 708 srcpeer = source.peer() # in case we were called with a localrepo
703 709 branches = (None, branch or [])
704 710 origsource = source = srcpeer.url()
705 711 srclock = destlock = destwlock = cleandir = None
706 712 destpeer = None
707 713 try:
708 714 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
709 715
710 716 if dest is None:
711 717 dest = defaultdest(source)
712 718 if dest:
713 719 ui.status(_(b"destination directory: %s\n") % dest)
714 720 else:
715 721 dest = urlutil.get_clone_path(ui, dest)[0]
716 722
717 723 dest = urlutil.urllocalpath(dest)
718 724 source = urlutil.urllocalpath(source)
719 725
720 726 if not dest:
721 727 raise error.InputError(_(b"empty destination path is not valid"))
722 728
723 729 destvfs = vfsmod.vfs(dest, expandpath=True)
724 730 if destvfs.lexists():
725 731 if not destvfs.isdir():
726 732 raise error.InputError(
727 733 _(b"destination '%s' already exists") % dest
728 734 )
729 735 elif destvfs.listdir():
730 736 raise error.InputError(
731 737 _(b"destination '%s' is not empty") % dest
732 738 )
733 739
734 740 createopts = {}
735 741 narrow = False
736 742
737 743 if storeincludepats is not None:
738 744 narrowspec.validatepatterns(storeincludepats)
739 745 narrow = True
740 746
741 747 if storeexcludepats is not None:
742 748 narrowspec.validatepatterns(storeexcludepats)
743 749 narrow = True
744 750
745 751 if narrow:
746 752 # Include everything by default if only exclusion patterns defined.
747 753 if storeexcludepats and not storeincludepats:
748 754 storeincludepats = {b'path:.'}
749 755
750 756 createopts[b'narrowfiles'] = True
751 757
752 758 if depth:
753 759 createopts[b'shallowfilestore'] = True
754 760
755 761 if srcpeer.capable(b'lfs-serve'):
756 762 # Repository creation honors the config if it disabled the extension, so
757 763 # we can't just announce that lfs will be enabled. This check avoids
758 764 # saying that lfs will be enabled, and then saying it's an unknown
759 765 # feature. The lfs creation option is set in either case so that a
760 766 # requirement is added. If the extension is explicitly disabled but the
761 767 # requirement is set, the clone aborts early, before transferring any
762 768 # data.
763 769 createopts[b'lfs'] = True
764 770
765 771 if extensions.disabled_help(b'lfs'):
766 772 ui.status(
767 773 _(
768 774 b'(remote is using large file support (lfs), but it is '
769 775 b'explicitly disabled in the local configuration)\n'
770 776 )
771 777 )
772 778 else:
773 779 ui.status(
774 780 _(
775 781 b'(remote is using large file support (lfs); lfs will '
776 782 b'be enabled for this repository)\n'
777 783 )
778 784 )
779 785
780 786 shareopts = shareopts or {}
781 787 sharepool = shareopts.get(b'pool')
782 788 sharenamemode = shareopts.get(b'mode')
783 789 if sharepool and islocal(dest):
784 790 sharepath = None
785 791 if sharenamemode == b'identity':
786 792 # Resolve the name from the initial changeset in the remote
787 793 # repository. This returns nullid when the remote is empty. It
788 794 # raises RepoLookupError if revision 0 is filtered or otherwise
789 795 # not available. If we fail to resolve, sharing is not enabled.
790 796 try:
791 797 with srcpeer.commandexecutor() as e:
792 798 rootnode = e.callcommand(
793 799 b'lookup',
794 800 {
795 801 b'key': b'0',
796 802 },
797 803 ).result()
798 804
799 805 if rootnode != sha1nodeconstants.nullid:
800 806 sharepath = os.path.join(sharepool, hex(rootnode))
801 807 else:
802 808 ui.status(
803 809 _(
804 810 b'(not using pooled storage: '
805 811 b'remote appears to be empty)\n'
806 812 )
807 813 )
808 814 except error.RepoLookupError:
809 815 ui.status(
810 816 _(
811 817 b'(not using pooled storage: '
812 818 b'unable to resolve identity of remote)\n'
813 819 )
814 820 )
815 821 elif sharenamemode == b'remote':
816 822 sharepath = os.path.join(
817 823 sharepool, hex(hashutil.sha1(source).digest())
818 824 )
819 825 else:
820 826 raise error.Abort(
821 827 _(b'unknown share naming mode: %s') % sharenamemode
822 828 )
823 829
824 830 # TODO this is a somewhat arbitrary restriction.
825 831 if narrow:
826 832 ui.status(
827 833 _(b'(pooled storage not supported for narrow clones)\n')
828 834 )
829 835 sharepath = None
830 836
831 837 if sharepath:
832 838 return clonewithshare(
833 839 ui,
834 840 peeropts,
835 841 sharepath,
836 842 source,
837 843 srcpeer,
838 844 dest,
839 845 pull=pull,
840 846 rev=revs,
841 847 update=update,
842 848 stream=stream,
843 849 )
844 850
845 851 srcrepo = srcpeer.local()
846 852
847 853 abspath = origsource
848 854 if islocal(origsource):
849 855 abspath = util.abspath(urlutil.urllocalpath(origsource))
850 856
851 857 if islocal(dest):
852 858 if os.path.exists(dest):
853 859 # only clean up directories we create ourselves
854 860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 861 cleandir = hgdir
856 862 else:
857 863 cleandir = dest
858 864
859 865 copy = False
860 866 if (
861 867 srcrepo
862 868 and srcrepo.cancopy()
863 869 and islocal(dest)
864 870 and not phases.hassecret(srcrepo)
865 871 ):
866 872 copy = not pull and not revs
867 873
868 874 # TODO this is a somewhat arbitrary restriction.
869 875 if narrow:
870 876 copy = False
871 877
872 878 if copy:
873 879 try:
874 880 # we use a lock here because if we race with commit, we
875 881 # can end up with extra data in the cloned revlogs that's
876 882 # not pointed to by changesets, thus causing verify to
877 883 # fail
878 884 srclock = srcrepo.lock(wait=False)
879 885 except error.LockError:
880 886 copy = False
881 887
882 888 if copy:
883 889 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
884 890
885 891 destrootpath = urlutil.urllocalpath(dest)
886 892 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
887 893 localrepo.createrepository(
888 894 ui,
889 895 destrootpath,
890 896 requirements=dest_reqs,
891 897 )
892 898 destrepo = localrepo.makelocalrepository(ui, destrootpath)
893 899
894 900 destwlock = destrepo.wlock()
895 901 destlock = destrepo.lock()
896 902 from . import streamclone # avoid cycle
897 903
898 904 streamclone.local_copy(srcrepo, destrepo)
899 905
900 906 # we need to re-init the repo after manually copying the data
901 907 # into it
902 908 destpeer = peer(srcrepo, peeropts, dest)
903 909
904 910 # make the peer aware that is it already locked
905 911 #
906 912 # important:
907 913 #
908 914 # We still need to release that lock at the end of the function
909 915 destpeer.local()._lockref = weakref.ref(destlock)
910 916 destpeer.local()._wlockref = weakref.ref(destwlock)
911 917 # dirstate also needs to be copied because `_wlockref` has a reference
912 918 # to it: this dirstate is saved to disk when the wlock is released
913 919 destpeer.local().dirstate = destrepo.dirstate
914 920
915 921 srcrepo.hook(
916 922 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
917 923 )
918 924 else:
919 925 try:
920 926 # only pass ui when no srcrepo
921 927 destpeer = peer(
922 928 srcrepo or ui,
923 929 peeropts,
924 930 dest,
925 931 create=True,
926 932 createopts=createopts,
927 933 )
928 934 except FileExistsError:
929 935 cleandir = None
930 936 raise error.Abort(_(b"destination '%s' already exists") % dest)
931 937
932 938 if revs:
933 939 if not srcpeer.capable(b'lookup'):
934 940 raise error.Abort(
935 941 _(
936 942 b"src repository does not support "
937 943 b"revision lookup and so doesn't "
938 944 b"support clone by revision"
939 945 )
940 946 )
941 947
942 948 # TODO this is batchable.
943 949 remoterevs = []
944 950 for rev in revs:
945 951 with srcpeer.commandexecutor() as e:
946 952 remoterevs.append(
947 953 e.callcommand(
948 954 b'lookup',
949 955 {
950 956 b'key': rev,
951 957 },
952 958 ).result()
953 959 )
954 960 revs = remoterevs
955 961
956 962 checkout = revs[0]
957 963 else:
958 964 revs = None
959 965 local = destpeer.local()
960 966 if local:
961 967 if narrow:
962 968 with local.wlock(), local.lock():
963 969 local.setnarrowpats(storeincludepats, storeexcludepats)
964 970 narrowspec.copytoworkingcopy(local)
965 971
966 972 u = urlutil.url(abspath)
967 973 defaulturl = bytes(u)
968 974 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
969 975 if not stream:
970 976 if pull:
971 977 stream = False
972 978 else:
973 979 stream = None
974 980 # internal config: ui.quietbookmarkmove
975 981 overrides = {(b'ui', b'quietbookmarkmove'): True}
976 982 with local.ui.configoverride(overrides, b'clone'):
977 983 exchange.pull(
978 984 local,
979 985 srcpeer,
980 986 heads=revs,
981 987 streamclonerequested=stream,
982 988 includepats=storeincludepats,
983 989 excludepats=storeexcludepats,
984 990 depth=depth,
985 991 )
986 992 elif srcrepo:
987 993 # TODO lift restriction once exchange.push() accepts narrow
988 994 # push.
989 995 if narrow:
990 996 raise error.Abort(
991 997 _(
992 998 b'narrow clone not available for '
993 999 b'remote destinations'
994 1000 )
995 1001 )
996 1002
997 1003 exchange.push(
998 1004 srcrepo,
999 1005 destpeer,
1000 1006 revs=revs,
1001 1007 bookmarks=srcrepo._bookmarks.keys(),
1002 1008 )
1003 1009 else:
1004 1010 raise error.Abort(
1005 1011 _(b"clone from remote to remote not supported")
1006 1012 )
1007 1013
1008 1014 cleandir = None
1009 1015
1010 1016 destrepo = destpeer.local()
1011 1017 if destrepo:
1012 1018 template = uimod.samplehgrcs[b'cloned']
1013 1019 u = urlutil.url(abspath)
1014 1020 u.passwd = None
1015 1021 defaulturl = bytes(u)
1016 1022 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1017 1023 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1018 1024
1019 1025 if ui.configbool(b'experimental', b'remotenames'):
1020 1026 logexchange.pullremotenames(destrepo, srcpeer)
1021 1027
1022 1028 if update:
1023 1029 if update is not True:
1024 1030 with srcpeer.commandexecutor() as e:
1025 1031 checkout = e.callcommand(
1026 1032 b'lookup',
1027 1033 {
1028 1034 b'key': update,
1029 1035 },
1030 1036 ).result()
1031 1037
1032 1038 uprev = None
1033 1039 status = None
1034 1040 if checkout is not None:
1035 1041 # Some extensions (at least hg-git and hg-subversion) have
1036 1042 # a peer.lookup() implementation that returns a name instead
1037 1043 # of a nodeid. We work around it here until we've figured
1038 1044 # out a better solution.
1039 1045 if len(checkout) == 20 and checkout in destrepo:
1040 1046 uprev = checkout
1041 1047 elif scmutil.isrevsymbol(destrepo, checkout):
1042 1048 uprev = scmutil.revsymbol(destrepo, checkout).node()
1043 1049 else:
1044 1050 if update is not True:
1045 1051 try:
1046 1052 uprev = destrepo.lookup(update)
1047 1053 except error.RepoLookupError:
1048 1054 pass
1049 1055 if uprev is None:
1050 1056 try:
1051 1057 if destrepo._activebookmark:
1052 1058 uprev = destrepo.lookup(destrepo._activebookmark)
1053 1059 update = destrepo._activebookmark
1054 1060 else:
1055 1061 uprev = destrepo._bookmarks[b'@']
1056 1062 update = b'@'
1057 1063 bn = destrepo[uprev].branch()
1058 1064 if bn == b'default':
1059 1065 status = _(b"updating to bookmark %s\n" % update)
1060 1066 else:
1061 1067 status = (
1062 1068 _(b"updating to bookmark %s on branch %s\n")
1063 1069 ) % (update, bn)
1064 1070 except KeyError:
1065 1071 try:
1066 1072 uprev = destrepo.branchtip(b'default')
1067 1073 except error.RepoLookupError:
1068 1074 uprev = destrepo.lookup(b'tip')
1069 1075 if not status:
1070 1076 bn = destrepo[uprev].branch()
1071 1077 status = _(b"updating to branch %s\n") % bn
1072 1078 destrepo.ui.status(status)
1073 1079 _update(destrepo, uprev)
1074 1080 if update in destrepo._bookmarks:
1075 1081 bookmarks.activate(destrepo, update)
1076 1082 if destlock is not None:
1077 1083 release(destlock)
1078 1084 if destwlock is not None:
1079 1085 release(destlock)
1080 1086 # here is a tiny windows were someone could end up writing the
1081 1087 # repository before the cache are sure to be warm. This is "fine"
1082 1088 # as the only "bad" outcome would be some slowness. That potential
1083 1089 # slowness already affect reader.
1084 1090 with destrepo.lock():
1085 1091 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1086 1092 finally:
1087 1093 release(srclock, destlock, destwlock)
1088 1094 if cleandir is not None:
1089 1095 shutil.rmtree(cleandir, True)
1090 1096 if srcpeer is not None:
1091 1097 srcpeer.close()
1092 1098 if destpeer and destpeer.local() is None:
1093 1099 destpeer.close()
1094 1100 return srcpeer, destpeer
1095 1101
1096 1102
1097 1103 def _showstats(repo, stats, quietempty=False):
1098 1104 if quietempty and stats.isempty():
1099 1105 return
1100 1106 repo.ui.status(
1101 1107 _(
1102 1108 b"%d files updated, %d files merged, "
1103 1109 b"%d files removed, %d files unresolved\n"
1104 1110 )
1105 1111 % (
1106 1112 stats.updatedcount,
1107 1113 stats.mergedcount,
1108 1114 stats.removedcount,
1109 1115 stats.unresolvedcount,
1110 1116 )
1111 1117 )
1112 1118
1113 1119
1114 1120 def updaterepo(repo, node, overwrite, updatecheck=None):
1115 1121 """Update the working directory to node.
1116 1122
1117 1123 When overwrite is set, changes are clobbered, merged else
1118 1124
1119 1125 returns stats (see pydoc mercurial.merge.applyupdates)"""
1120 1126 repo.ui.deprecwarn(
1121 1127 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1122 1128 b'5.7',
1123 1129 )
1124 1130 return mergemod._update(
1125 1131 repo,
1126 1132 node,
1127 1133 branchmerge=False,
1128 1134 force=overwrite,
1129 1135 labels=[b'working copy', b'destination'],
1130 1136 updatecheck=updatecheck,
1131 1137 )
1132 1138
1133 1139
1134 1140 def update(repo, node, quietempty=False, updatecheck=None):
1135 1141 """update the working directory to node"""
1136 1142 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1137 1143 _showstats(repo, stats, quietempty)
1138 1144 if stats.unresolvedcount:
1139 1145 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1140 1146 return stats.unresolvedcount > 0
1141 1147
1142 1148
1143 1149 # naming conflict in clone()
1144 1150 _update = update
1145 1151
1146 1152
1147 1153 def clean(repo, node, show_stats=True, quietempty=False):
1148 1154 """forcibly switch the working directory to node, clobbering changes"""
1149 1155 stats = mergemod.clean_update(repo[node])
1150 1156 assert stats.unresolvedcount == 0
1151 1157 if show_stats:
1152 1158 _showstats(repo, stats, quietempty)
1153 1159 return False
1154 1160
1155 1161
1156 1162 # naming conflict in updatetotally()
1157 1163 _clean = clean
1158 1164
1159 1165 _VALID_UPDATECHECKS = {
1160 1166 mergemod.UPDATECHECK_ABORT,
1161 1167 mergemod.UPDATECHECK_NONE,
1162 1168 mergemod.UPDATECHECK_LINEAR,
1163 1169 mergemod.UPDATECHECK_NO_CONFLICT,
1164 1170 }
1165 1171
1166 1172
1167 1173 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1168 1174 """Update the working directory with extra care for non-file components
1169 1175
1170 1176 This takes care of non-file components below:
1171 1177
1172 1178 :bookmark: might be advanced or (in)activated
1173 1179
1174 1180 This takes arguments below:
1175 1181
1176 1182 :checkout: to which revision the working directory is updated
1177 1183 :brev: a name, which might be a bookmark to be activated after updating
1178 1184 :clean: whether changes in the working directory can be discarded
1179 1185 :updatecheck: how to deal with a dirty working directory
1180 1186
1181 1187 Valid values for updatecheck are the UPDATECHECK_* constants
1182 1188 defined in the merge module. Passing `None` will result in using the
1183 1189 configured default.
1184 1190
1185 1191 * ABORT: abort if the working directory is dirty
1186 1192 * NONE: don't check (merge working directory changes into destination)
1187 1193 * LINEAR: check that update is linear before merging working directory
1188 1194 changes into destination
1189 1195 * NO_CONFLICT: check that the update does not result in file merges
1190 1196
1191 1197 This returns whether conflict is detected at updating or not.
1192 1198 """
1193 1199 if updatecheck is None:
1194 1200 updatecheck = ui.config(b'commands', b'update.check')
1195 1201 if updatecheck not in _VALID_UPDATECHECKS:
1196 1202 # If not configured, or invalid value configured
1197 1203 updatecheck = mergemod.UPDATECHECK_LINEAR
1198 1204 if updatecheck not in _VALID_UPDATECHECKS:
1199 1205 raise ValueError(
1200 1206 r'Invalid updatecheck value %r (can accept %r)'
1201 1207 % (updatecheck, _VALID_UPDATECHECKS)
1202 1208 )
1203 1209 with repo.wlock():
1204 1210 movemarkfrom = None
1205 1211 warndest = False
1206 1212 if checkout is None:
1207 1213 updata = destutil.destupdate(repo, clean=clean)
1208 1214 checkout, movemarkfrom, brev = updata
1209 1215 warndest = True
1210 1216
1211 1217 if clean:
1212 1218 ret = _clean(repo, checkout)
1213 1219 else:
1214 1220 if updatecheck == mergemod.UPDATECHECK_ABORT:
1215 1221 cmdutil.bailifchanged(repo, merge=False)
1216 1222 updatecheck = mergemod.UPDATECHECK_NONE
1217 1223 ret = _update(repo, checkout, updatecheck=updatecheck)
1218 1224
1219 1225 if not ret and movemarkfrom:
1220 1226 if movemarkfrom == repo[b'.'].node():
1221 1227 pass # no-op update
1222 1228 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1223 1229 b = ui.label(repo._activebookmark, b'bookmarks.active')
1224 1230 ui.status(_(b"updating bookmark %s\n") % b)
1225 1231 else:
1226 1232 # this can happen with a non-linear update
1227 1233 b = ui.label(repo._activebookmark, b'bookmarks')
1228 1234 ui.status(_(b"(leaving bookmark %s)\n") % b)
1229 1235 bookmarks.deactivate(repo)
1230 1236 elif brev in repo._bookmarks:
1231 1237 if brev != repo._activebookmark:
1232 1238 b = ui.label(brev, b'bookmarks.active')
1233 1239 ui.status(_(b"(activating bookmark %s)\n") % b)
1234 1240 bookmarks.activate(repo, brev)
1235 1241 elif brev:
1236 1242 if repo._activebookmark:
1237 1243 b = ui.label(repo._activebookmark, b'bookmarks')
1238 1244 ui.status(_(b"(leaving bookmark %s)\n") % b)
1239 1245 bookmarks.deactivate(repo)
1240 1246
1241 1247 if warndest:
1242 1248 destutil.statusotherdests(ui, repo)
1243 1249
1244 1250 return ret
1245 1251
1246 1252
1247 1253 def merge(
1248 1254 ctx,
1249 1255 force=False,
1250 1256 remind=True,
1251 1257 labels=None,
1252 1258 ):
1253 1259 """Branch merge with node, resolving changes. Return true if any
1254 1260 unresolved conflicts."""
1255 1261 repo = ctx.repo()
1256 1262 stats = mergemod.merge(ctx, force=force, labels=labels)
1257 1263 _showstats(repo, stats)
1258 1264 if stats.unresolvedcount:
1259 1265 repo.ui.status(
1260 1266 _(
1261 1267 b"use 'hg resolve' to retry unresolved file merges "
1262 1268 b"or 'hg merge --abort' to abandon\n"
1263 1269 )
1264 1270 )
1265 1271 elif remind:
1266 1272 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1267 1273 return stats.unresolvedcount > 0
1268 1274
1269 1275
1270 1276 def abortmerge(ui, repo):
1271 1277 ms = mergestatemod.mergestate.read(repo)
1272 1278 if ms.active():
1273 1279 # there were conflicts
1274 1280 node = ms.localctx.hex()
1275 1281 else:
1276 1282 # there were no conficts, mergestate was not stored
1277 1283 node = repo[b'.'].hex()
1278 1284
1279 1285 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1280 1286 stats = mergemod.clean_update(repo[node])
1281 1287 assert stats.unresolvedcount == 0
1282 1288 _showstats(repo, stats)
1283 1289
1284 1290
1285 1291 def _incoming(
1286 1292 displaychlist,
1287 1293 subreporecurse,
1288 1294 ui,
1289 1295 repo,
1290 1296 source,
1291 1297 opts,
1292 1298 buffered=False,
1293 1299 subpath=None,
1294 1300 ):
1295 1301 """
1296 1302 Helper for incoming / gincoming.
1297 1303 displaychlist gets called with
1298 1304 (remoterepo, incomingchangesetlist, displayer) parameters,
1299 1305 and is supposed to contain only code that can't be unified.
1300 1306 """
1301 1307 srcs = urlutil.get_pull_paths(repo, ui, [source])
1302 1308 srcs = list(srcs)
1303 1309 if len(srcs) != 1:
1304 1310 msg = _(b'for now, incoming supports only a single source, %d provided')
1305 1311 msg %= len(srcs)
1306 1312 raise error.Abort(msg)
1307 1313 path = srcs[0]
1308 1314 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1309 1315 if subpath is not None:
1310 1316 subpath = urlutil.url(subpath)
1311 1317 if subpath.isabs():
1312 1318 source = bytes(subpath)
1313 1319 else:
1314 1320 p = urlutil.url(source)
1315 1321 if p.islocal():
1316 1322 normpath = os.path.normpath
1317 1323 else:
1318 1324 normpath = posixpath.normpath
1319 1325 p.path = normpath(b'%s/%s' % (p.path, subpath))
1320 1326 source = bytes(p)
1321 1327 other = peer(repo, opts, source)
1322 1328 cleanupfn = other.close
1323 1329 try:
1324 1330 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1325 1331 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1326 1332
1327 1333 if revs:
1328 1334 revs = [other.lookup(rev) for rev in revs]
1329 1335 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1330 1336 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1331 1337 )
1332 1338
1333 1339 if not chlist:
1334 1340 ui.status(_(b"no changes found\n"))
1335 1341 return subreporecurse()
1336 1342 ui.pager(b'incoming')
1337 1343 displayer = logcmdutil.changesetdisplayer(
1338 1344 ui, other, opts, buffered=buffered
1339 1345 )
1340 1346 displaychlist(other, chlist, displayer)
1341 1347 displayer.close()
1342 1348 finally:
1343 1349 cleanupfn()
1344 1350 subreporecurse()
1345 1351 return 0 # exit code is zero since we found incoming changes
1346 1352
1347 1353
1348 1354 def incoming(ui, repo, source, opts, subpath=None):
1349 1355 def subreporecurse():
1350 1356 ret = 1
1351 1357 if opts.get(b'subrepos'):
1352 1358 ctx = repo[None]
1353 1359 for subpath in sorted(ctx.substate):
1354 1360 sub = ctx.sub(subpath)
1355 1361 ret = min(ret, sub.incoming(ui, source, opts))
1356 1362 return ret
1357 1363
1358 1364 def display(other, chlist, displayer):
1359 1365 limit = logcmdutil.getlimit(opts)
1360 1366 if opts.get(b'newest_first'):
1361 1367 chlist.reverse()
1362 1368 count = 0
1363 1369 for n in chlist:
1364 1370 if limit is not None and count >= limit:
1365 1371 break
1366 1372 parents = [
1367 1373 p for p in other.changelog.parents(n) if p != repo.nullid
1368 1374 ]
1369 1375 if opts.get(b'no_merges') and len(parents) == 2:
1370 1376 continue
1371 1377 count += 1
1372 1378 displayer.show(other[n])
1373 1379
1374 1380 return _incoming(
1375 1381 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1376 1382 )
1377 1383
1378 1384
1379 1385 def _outgoing(ui, repo, dests, opts, subpath=None):
1380 1386 out = set()
1381 1387 others = []
1382 1388 for path in urlutil.get_push_paths(repo, ui, dests):
1383 1389 dest = path.loc
1384 1390 if subpath is not None:
1385 1391 subpath = urlutil.url(subpath)
1386 1392 if subpath.isabs():
1387 1393 dest = bytes(subpath)
1388 1394 else:
1389 1395 p = urlutil.url(dest)
1390 1396 if p.islocal():
1391 1397 normpath = os.path.normpath
1392 1398 else:
1393 1399 normpath = posixpath.normpath
1394 1400 p.path = normpath(b'%s/%s' % (p.path, subpath))
1395 1401 dest = bytes(p)
1396 1402 branches = path.branch, opts.get(b'branch') or []
1397 1403
1398 1404 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1399 1405 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1400 1406 if revs:
1401 1407 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1402 1408
1403 1409 other = peer(repo, opts, dest)
1404 1410 try:
1405 1411 outgoing = discovery.findcommonoutgoing(
1406 1412 repo, other, revs, force=opts.get(b'force')
1407 1413 )
1408 1414 o = outgoing.missing
1409 1415 out.update(o)
1410 1416 if not o:
1411 1417 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1412 1418 others.append(other)
1413 1419 except: # re-raises
1414 1420 other.close()
1415 1421 raise
1416 1422 # make sure this is ordered by revision number
1417 1423 outgoing_revs = list(out)
1418 1424 cl = repo.changelog
1419 1425 outgoing_revs.sort(key=cl.rev)
1420 1426 return outgoing_revs, others
1421 1427
1422 1428
1423 1429 def _outgoing_recurse(ui, repo, dests, opts):
1424 1430 ret = 1
1425 1431 if opts.get(b'subrepos'):
1426 1432 ctx = repo[None]
1427 1433 for subpath in sorted(ctx.substate):
1428 1434 sub = ctx.sub(subpath)
1429 1435 ret = min(ret, sub.outgoing(ui, dests, opts))
1430 1436 return ret
1431 1437
1432 1438
1433 1439 def _outgoing_filter(repo, revs, opts):
1434 1440 """apply revision filtering/ordering option for outgoing"""
1435 1441 limit = logcmdutil.getlimit(opts)
1436 1442 no_merges = opts.get(b'no_merges')
1437 1443 if opts.get(b'newest_first'):
1438 1444 revs.reverse()
1439 1445 if limit is None and not no_merges:
1440 1446 for r in revs:
1441 1447 yield r
1442 1448 return
1443 1449
1444 1450 count = 0
1445 1451 cl = repo.changelog
1446 1452 for n in revs:
1447 1453 if limit is not None and count >= limit:
1448 1454 break
1449 1455 parents = [p for p in cl.parents(n) if p != repo.nullid]
1450 1456 if no_merges and len(parents) == 2:
1451 1457 continue
1452 1458 count += 1
1453 1459 yield n
1454 1460
1455 1461
1456 1462 def outgoing(ui, repo, dests, opts, subpath=None):
1457 1463 if opts.get(b'graph'):
1458 1464 logcmdutil.checkunsupportedgraphflags([], opts)
1459 1465 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1460 1466 ret = 1
1461 1467 try:
1462 1468 if o:
1463 1469 ret = 0
1464 1470
1465 1471 if opts.get(b'graph'):
1466 1472 revdag = logcmdutil.graphrevs(repo, o, opts)
1467 1473 ui.pager(b'outgoing')
1468 1474 displayer = logcmdutil.changesetdisplayer(
1469 1475 ui, repo, opts, buffered=True
1470 1476 )
1471 1477 logcmdutil.displaygraph(
1472 1478 ui, repo, revdag, displayer, graphmod.asciiedges
1473 1479 )
1474 1480 else:
1475 1481 ui.pager(b'outgoing')
1476 1482 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1477 1483 for n in _outgoing_filter(repo, o, opts):
1478 1484 displayer.show(repo[n])
1479 1485 displayer.close()
1480 1486 for oth in others:
1481 1487 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1482 1488 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1483 1489 return ret # exit code is zero since we found outgoing changes
1484 1490 finally:
1485 1491 for oth in others:
1486 1492 oth.close()
1487 1493
1488 1494
1489 1495 def verify(repo, level=None):
1490 1496 """verify the consistency of a repository"""
1491 1497 ret = verifymod.verify(repo, level=level)
1492 1498
1493 1499 # Broken subrepo references in hidden csets don't seem worth worrying about,
1494 1500 # since they can't be pushed/pulled, and --hidden can be used if they are a
1495 1501 # concern.
1496 1502
1497 1503 # pathto() is needed for -R case
1498 1504 revs = repo.revs(
1499 1505 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1500 1506 )
1501 1507
1502 1508 if revs:
1503 1509 repo.ui.status(_(b'checking subrepo links\n'))
1504 1510 for rev in revs:
1505 1511 ctx = repo[rev]
1506 1512 try:
1507 1513 for subpath in ctx.substate:
1508 1514 try:
1509 1515 ret = (
1510 1516 ctx.sub(subpath, allowcreate=False).verify() or ret
1511 1517 )
1512 1518 except error.RepoError as e:
1513 1519 repo.ui.warn(b'%d: %s\n' % (rev, e))
1514 1520 except Exception:
1515 1521 repo.ui.warn(
1516 1522 _(b'.hgsubstate is corrupt in revision %s\n')
1517 1523 % short(ctx.node())
1518 1524 )
1519 1525
1520 1526 return ret
1521 1527
1522 1528
1523 1529 def remoteui(src, opts):
1524 1530 """build a remote ui from ui or repo and opts"""
1525 1531 if util.safehasattr(src, b'baseui'): # looks like a repository
1526 1532 dst = src.baseui.copy() # drop repo-specific config
1527 1533 src = src.ui # copy target options from repo
1528 1534 else: # assume it's a global ui object
1529 1535 dst = src.copy() # keep all global options
1530 1536
1531 1537 # copy ssh-specific options
1532 1538 for o in b'ssh', b'remotecmd':
1533 1539 v = opts.get(o) or src.config(b'ui', o)
1534 1540 if v:
1535 1541 dst.setconfig(b"ui", o, v, b'copied')
1536 1542
1537 1543 # copy bundle-specific options
1538 1544 r = src.config(b'bundle', b'mainreporoot')
1539 1545 if r:
1540 1546 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1541 1547
1542 1548 # copy selected local settings to the remote ui
1543 1549 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1544 1550 for key, val in src.configitems(sect):
1545 1551 dst.setconfig(sect, key, val, b'copied')
1546 1552 v = src.config(b'web', b'cacerts')
1547 1553 if v:
1548 1554 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1549 1555
1550 1556 return dst
1551 1557
1552 1558
1553 1559 # Files of interest
1554 1560 # Used to check if the repository has changed looking at mtime and size of
1555 1561 # these files.
1556 1562 foi = [
1557 1563 (b'spath', b'00changelog.i'),
1558 1564 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1559 1565 (b'spath', b'obsstore'),
1560 1566 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1561 1567 ]
1562 1568
1563 1569
1564 1570 class cachedlocalrepo:
1565 1571 """Holds a localrepository that can be cached and reused."""
1566 1572
1567 1573 def __init__(self, repo):
1568 1574 """Create a new cached repo from an existing repo.
1569 1575
1570 1576 We assume the passed in repo was recently created. If the
1571 1577 repo has changed between when it was created and when it was
1572 1578 turned into a cache, it may not refresh properly.
1573 1579 """
1574 1580 assert isinstance(repo, localrepo.localrepository)
1575 1581 self._repo = repo
1576 1582 self._state, self.mtime = self._repostate()
1577 1583 self._filtername = repo.filtername
1578 1584
1579 1585 def fetch(self):
1580 1586 """Refresh (if necessary) and return a repository.
1581 1587
1582 1588 If the cached instance is out of date, it will be recreated
1583 1589 automatically and returned.
1584 1590
1585 1591 Returns a tuple of the repo and a boolean indicating whether a new
1586 1592 repo instance was created.
1587 1593 """
1588 1594 # We compare the mtimes and sizes of some well-known files to
1589 1595 # determine if the repo changed. This is not precise, as mtimes
1590 1596 # are susceptible to clock skew and imprecise filesystems and
1591 1597 # file content can change while maintaining the same size.
1592 1598
1593 1599 state, mtime = self._repostate()
1594 1600 if state == self._state:
1595 1601 return self._repo, False
1596 1602
1597 1603 repo = repository(self._repo.baseui, self._repo.url())
1598 1604 if self._filtername:
1599 1605 self._repo = repo.filtered(self._filtername)
1600 1606 else:
1601 1607 self._repo = repo.unfiltered()
1602 1608 self._state = state
1603 1609 self.mtime = mtime
1604 1610
1605 1611 return self._repo, True
1606 1612
1607 1613 def _repostate(self):
1608 1614 state = []
1609 1615 maxmtime = -1
1610 1616 for attr, fname in foi:
1611 1617 prefix = getattr(self._repo, attr)
1612 1618 p = os.path.join(prefix, fname)
1613 1619 try:
1614 1620 st = os.stat(p)
1615 1621 except OSError:
1616 1622 st = os.stat(prefix)
1617 1623 state.append((st[stat.ST_MTIME], st.st_size))
1618 1624 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1619 1625
1620 1626 return tuple(state), maxmtime
1621 1627
1622 1628 def copy(self):
1623 1629 """Obtain a copy of this class instance.
1624 1630
1625 1631 A new localrepository instance is obtained. The new instance should be
1626 1632 completely independent of the original.
1627 1633 """
1628 1634 repo = repository(self._repo.baseui, self._repo.origroot)
1629 1635 if self._filtername:
1630 1636 repo = repo.filtered(self._filtername)
1631 1637 else:
1632 1638 repo = repo.unfiltered()
1633 1639 c = cachedlocalrepo(repo)
1634 1640 c._state = self._state
1635 1641 c.mtime = self.mtime
1636 1642 return c
General Comments 0
You need to be logged in to leave comments. Login now