##// END OF EJS Templates
peer-or-repo: build a peer directly in the `peer` function...
marmoute -
r50587:c0acf544 default
parent child Browse files
Show More
@@ -1,1624 +1,1643 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 146 repo_schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 150 }
151 151
152 152 peer_schemes = {
153 153 b'http': httppeer,
154 154 b'https': httppeer,
155 155 b'ssh': sshpeer,
156 156 b'static-http': statichttprepo,
157 157 }
158 158
159 159
160 160 def _peerlookup(path):
161 161 u = urlutil.url(path)
162 162 scheme = u.scheme or b'file'
163 163 if scheme in peer_schemes:
164 164 return peer_schemes[scheme]
165 165 if scheme in repo_schemes:
166 166 return repo_schemes[scheme]
167 167 return LocalFactory
168 168
169 169
170 170 def islocal(repo):
171 171 '''return true if repo (or path pointing to repo) is local'''
172 172 if isinstance(repo, bytes):
173 173 cls = _peerlookup(repo)
174 174 cls.instance # make sure we load the module
175 175 if util.safehasattr(cls, 'islocal'):
176 176 return cls.islocal(repo) # pytype: disable=module-attr
177 177 return False
178 178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 179 return repo.local()
180 180
181 181
182 182 def openpath(ui, path, sendaccept=True):
183 183 '''open path with open if local, url.open if remote'''
184 184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 185 if pathurl.islocal():
186 186 return util.posixfile(pathurl.localpath(), b'rb')
187 187 else:
188 188 return url.open(ui, path, sendaccept=sendaccept)
189 189
190 190
191 191 # a list of (ui, repo) functions called for wire peer initialization
192 192 wirepeersetupfuncs = []
193 193
194 194
195 195 def _peerorrepo(
196 196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 197 ):
198 198 """return a repository object for the specified path"""
199 199 cls = _peerlookup(path)
200 200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 202 return obj
203 203
204 204
205 205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 206 ui = getattr(obj, "ui", ui)
207 207 for f in presetupfuncs or []:
208 208 f(ui, obj)
209 209 ui.log(b'extension', b'- executing reposetup hooks\n')
210 210 with util.timedcm('all reposetup') as allreposetupstats:
211 211 for name, module in extensions.extensions(ui):
212 212 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 213 hook = getattr(module, 'reposetup', None)
214 214 if hook:
215 215 with util.timedcm('reposetup %r', name) as stats:
216 216 hook(ui, obj)
217 217 msg = b' > reposetup for %s took %s\n'
218 218 ui.log(b'extension', msg, name, stats)
219 219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 220 if not obj.local():
221 221 for f in wirepeersetupfuncs:
222 222 f(ui, obj)
223 223
224 224
225 225 def repository(
226 226 ui,
227 227 path=b'',
228 228 create=False,
229 229 presetupfuncs=None,
230 230 intents=None,
231 231 createopts=None,
232 232 ):
233 233 """return a repository object for the specified path"""
234 234 peer = _peerorrepo(
235 235 ui,
236 236 path,
237 237 create,
238 238 presetupfuncs=presetupfuncs,
239 239 intents=intents,
240 240 createopts=createopts,
241 241 )
242 242 repo = peer.local()
243 243 if not repo:
244 244 raise error.Abort(
245 245 _(b"repository '%s' is not local") % (path or peer.url())
246 246 )
247 247 return repo.filtered(b'visible')
248 248
249 249
250 250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
251 251 '''return a repository peer for the specified path'''
252 252 rui = remoteui(uiorrepo, opts)
253 return _peerorrepo(
254 rui, path, create, intents=intents, createopts=createopts
255 ).peer()
253 scheme = urlutil.url(path).scheme
254 if scheme in peer_schemes:
255 cls = peer_schemes[scheme]
256 peer = cls.instance(
257 rui,
258 path,
259 create,
260 intents=intents,
261 createopts=createopts,
262 )
263 _setup_repo_or_peer(rui, peer)
264 else:
265 # this is a repository
266 repo = repository(
267 rui,
268 path,
269 create,
270 intents=intents,
271 createopts=createopts,
272 )
273 peer = repo.peer()
274 return peer
256 275
257 276
258 277 def defaultdest(source):
259 278 """return default destination of clone if none is given
260 279
261 280 >>> defaultdest(b'foo')
262 281 'foo'
263 282 >>> defaultdest(b'/foo/bar')
264 283 'bar'
265 284 >>> defaultdest(b'/')
266 285 ''
267 286 >>> defaultdest(b'')
268 287 ''
269 288 >>> defaultdest(b'http://example.org/')
270 289 ''
271 290 >>> defaultdest(b'http://example.org/foo/')
272 291 'foo'
273 292 """
274 293 path = urlutil.url(source).path
275 294 if not path:
276 295 return b''
277 296 return os.path.basename(os.path.normpath(path))
278 297
279 298
280 299 def sharedreposource(repo):
281 300 """Returns repository object for source repository of a shared repo.
282 301
283 302 If repo is not a shared repository, returns None.
284 303 """
285 304 if repo.sharedpath == repo.path:
286 305 return None
287 306
288 307 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
289 308 return repo.srcrepo
290 309
291 310 # the sharedpath always ends in the .hg; we want the path to the repo
292 311 source = repo.vfs.split(repo.sharedpath)[0]
293 312 srcurl, branches = urlutil.parseurl(source)
294 313 srcrepo = repository(repo.ui, srcurl)
295 314 repo.srcrepo = srcrepo
296 315 return srcrepo
297 316
298 317
299 318 def share(
300 319 ui,
301 320 source,
302 321 dest=None,
303 322 update=True,
304 323 bookmarks=True,
305 324 defaultpath=None,
306 325 relative=False,
307 326 ):
308 327 '''create a shared repository'''
309 328
310 329 not_local_msg = _(b'can only share local repositories')
311 330 if util.safehasattr(source, 'local'):
312 331 if source.local() is None:
313 332 raise error.Abort(not_local_msg)
314 333 elif not islocal(source):
315 334 # XXX why are we getting bytes here ?
316 335 raise error.Abort(not_local_msg)
317 336
318 337 if not dest:
319 338 dest = defaultdest(source)
320 339 else:
321 340 dest = urlutil.get_clone_path(ui, dest)[1]
322 341
323 342 if isinstance(source, bytes):
324 343 origsource, source, branches = urlutil.get_clone_path(ui, source)
325 344 srcrepo = repository(ui, source)
326 345 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
327 346 else:
328 347 srcrepo = source.local()
329 348 checkout = None
330 349
331 350 shareditems = set()
332 351 if bookmarks:
333 352 shareditems.add(sharedbookmarks)
334 353
335 354 r = repository(
336 355 ui,
337 356 dest,
338 357 create=True,
339 358 createopts={
340 359 b'sharedrepo': srcrepo,
341 360 b'sharedrelative': relative,
342 361 b'shareditems': shareditems,
343 362 },
344 363 )
345 364
346 365 postshare(srcrepo, r, defaultpath=defaultpath)
347 366 r = repository(ui, dest)
348 367 _postshareupdate(r, update, checkout=checkout)
349 368 return r
350 369
351 370
352 371 def _prependsourcehgrc(repo):
353 372 """copies the source repo config and prepend it in current repo .hg/hgrc
354 373 on unshare. This is only done if the share was perfomed using share safe
355 374 method where we share config of source in shares"""
356 375 srcvfs = vfsmod.vfs(repo.sharedpath)
357 376 dstvfs = vfsmod.vfs(repo.path)
358 377
359 378 if not srcvfs.exists(b'hgrc'):
360 379 return
361 380
362 381 currentconfig = b''
363 382 if dstvfs.exists(b'hgrc'):
364 383 currentconfig = dstvfs.read(b'hgrc')
365 384
366 385 with dstvfs(b'hgrc', b'wb') as fp:
367 386 sourceconfig = srcvfs.read(b'hgrc')
368 387 fp.write(b"# Config copied from shared source\n")
369 388 fp.write(sourceconfig)
370 389 fp.write(b'\n')
371 390 fp.write(currentconfig)
372 391
373 392
374 393 def unshare(ui, repo):
375 394 """convert a shared repository to a normal one
376 395
377 396 Copy the store data to the repo and remove the sharedpath data.
378 397
379 398 Returns a new repository object representing the unshared repository.
380 399
381 400 The passed repository object is not usable after this function is
382 401 called.
383 402 """
384 403
385 404 with repo.lock():
386 405 # we use locks here because if we race with commit, we
387 406 # can end up with extra data in the cloned revlogs that's
388 407 # not pointed to by changesets, thus causing verify to
389 408 # fail
390 409 destlock = copystore(ui, repo, repo.path)
391 410 with destlock or util.nullcontextmanager():
392 411 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
393 412 # we were sharing .hg/hgrc of the share source with the current
394 413 # repo. We need to copy that while unsharing otherwise it can
395 414 # disable hooks and other checks
396 415 _prependsourcehgrc(repo)
397 416
398 417 sharefile = repo.vfs.join(b'sharedpath')
399 418 util.rename(sharefile, sharefile + b'.old')
400 419
401 420 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
402 421 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
403 422 scmutil.writereporequirements(repo)
404 423
405 424 # Removing share changes some fundamental properties of the repo instance.
406 425 # So we instantiate a new repo object and operate on it rather than
407 426 # try to keep the existing repo usable.
408 427 newrepo = repository(repo.baseui, repo.root, create=False)
409 428
410 429 # TODO: figure out how to access subrepos that exist, but were previously
411 430 # removed from .hgsub
412 431 c = newrepo[b'.']
413 432 subs = c.substate
414 433 for s in sorted(subs):
415 434 c.sub(s).unshare()
416 435
417 436 localrepo.poisonrepository(repo)
418 437
419 438 return newrepo
420 439
421 440
422 441 def postshare(sourcerepo, destrepo, defaultpath=None):
423 442 """Called after a new shared repo is created.
424 443
425 444 The new repo only has a requirements file and pointer to the source.
426 445 This function configures additional shared data.
427 446
428 447 Extensions can wrap this function and write additional entries to
429 448 destrepo/.hg/shared to indicate additional pieces of data to be shared.
430 449 """
431 450 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
432 451 if default:
433 452 template = b'[paths]\ndefault = %s\n'
434 453 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
435 454 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
436 455 with destrepo.wlock():
437 456 narrowspec.copytoworkingcopy(destrepo)
438 457
439 458
440 459 def _postshareupdate(repo, update, checkout=None):
441 460 """Maybe perform a working directory update after a shared repo is created.
442 461
443 462 ``update`` can be a boolean or a revision to update to.
444 463 """
445 464 if not update:
446 465 return
447 466
448 467 repo.ui.status(_(b"updating working directory\n"))
449 468 if update is not True:
450 469 checkout = update
451 470 for test in (checkout, b'default', b'tip'):
452 471 if test is None:
453 472 continue
454 473 try:
455 474 uprev = repo.lookup(test)
456 475 break
457 476 except error.RepoLookupError:
458 477 continue
459 478 _update(repo, uprev)
460 479
461 480
462 481 def copystore(ui, srcrepo, destpath):
463 482 """copy files from store of srcrepo in destpath
464 483
465 484 returns destlock
466 485 """
467 486 destlock = None
468 487 try:
469 488 hardlink = None
470 489 topic = _(b'linking') if hardlink else _(b'copying')
471 490 with ui.makeprogress(topic, unit=_(b'files')) as progress:
472 491 num = 0
473 492 srcpublishing = srcrepo.publishing()
474 493 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
475 494 dstvfs = vfsmod.vfs(destpath)
476 495 for f in srcrepo.store.copylist():
477 496 if srcpublishing and f.endswith(b'phaseroots'):
478 497 continue
479 498 dstbase = os.path.dirname(f)
480 499 if dstbase and not dstvfs.exists(dstbase):
481 500 dstvfs.mkdir(dstbase)
482 501 if srcvfs.exists(f):
483 502 if f.endswith(b'data'):
484 503 # 'dstbase' may be empty (e.g. revlog format 0)
485 504 lockfile = os.path.join(dstbase, b"lock")
486 505 # lock to avoid premature writing to the target
487 506 destlock = lock.lock(dstvfs, lockfile)
488 507 hardlink, n = util.copyfiles(
489 508 srcvfs.join(f), dstvfs.join(f), hardlink, progress
490 509 )
491 510 num += n
492 511 if hardlink:
493 512 ui.debug(b"linked %d files\n" % num)
494 513 else:
495 514 ui.debug(b"copied %d files\n" % num)
496 515 return destlock
497 516 except: # re-raises
498 517 release(destlock)
499 518 raise
500 519
501 520
502 521 def clonewithshare(
503 522 ui,
504 523 peeropts,
505 524 sharepath,
506 525 source,
507 526 srcpeer,
508 527 dest,
509 528 pull=False,
510 529 rev=None,
511 530 update=True,
512 531 stream=False,
513 532 ):
514 533 """Perform a clone using a shared repo.
515 534
516 535 The store for the repository will be located at <sharepath>/.hg. The
517 536 specified revisions will be cloned or pulled from "source". A shared repo
518 537 will be created at "dest" and a working copy will be created if "update" is
519 538 True.
520 539 """
521 540 revs = None
522 541 if rev:
523 542 if not srcpeer.capable(b'lookup'):
524 543 raise error.Abort(
525 544 _(
526 545 b"src repository does not support "
527 546 b"revision lookup and so doesn't "
528 547 b"support clone by revision"
529 548 )
530 549 )
531 550
532 551 # TODO this is batchable.
533 552 remoterevs = []
534 553 for r in rev:
535 554 with srcpeer.commandexecutor() as e:
536 555 remoterevs.append(
537 556 e.callcommand(
538 557 b'lookup',
539 558 {
540 559 b'key': r,
541 560 },
542 561 ).result()
543 562 )
544 563 revs = remoterevs
545 564
546 565 # Obtain a lock before checking for or cloning the pooled repo otherwise
547 566 # 2 clients may race creating or populating it.
548 567 pooldir = os.path.dirname(sharepath)
549 568 # lock class requires the directory to exist.
550 569 try:
551 570 util.makedir(pooldir, False)
552 571 except FileExistsError:
553 572 pass
554 573
555 574 poolvfs = vfsmod.vfs(pooldir)
556 575 basename = os.path.basename(sharepath)
557 576
558 577 with lock.lock(poolvfs, b'%s.lock' % basename):
559 578 if os.path.exists(sharepath):
560 579 ui.status(
561 580 _(b'(sharing from existing pooled repository %s)\n') % basename
562 581 )
563 582 else:
564 583 ui.status(
565 584 _(b'(sharing from new pooled repository %s)\n') % basename
566 585 )
567 586 # Always use pull mode because hardlinks in share mode don't work
568 587 # well. Never update because working copies aren't necessary in
569 588 # share mode.
570 589 clone(
571 590 ui,
572 591 peeropts,
573 592 source,
574 593 dest=sharepath,
575 594 pull=True,
576 595 revs=rev,
577 596 update=False,
578 597 stream=stream,
579 598 )
580 599
581 600 # Resolve the value to put in [paths] section for the source.
582 601 if islocal(source):
583 602 defaultpath = util.abspath(urlutil.urllocalpath(source))
584 603 else:
585 604 defaultpath = source
586 605
587 606 sharerepo = repository(ui, path=sharepath)
588 607 destrepo = share(
589 608 ui,
590 609 sharerepo,
591 610 dest=dest,
592 611 update=False,
593 612 bookmarks=False,
594 613 defaultpath=defaultpath,
595 614 )
596 615
597 616 # We need to perform a pull against the dest repo to fetch bookmarks
598 617 # and other non-store data that isn't shared by default. In the case of
599 618 # non-existing shared repo, this means we pull from the remote twice. This
600 619 # is a bit weird. But at the time it was implemented, there wasn't an easy
601 620 # way to pull just non-changegroup data.
602 621 exchange.pull(destrepo, srcpeer, heads=revs)
603 622
604 623 _postshareupdate(destrepo, update)
605 624
606 625 return srcpeer, peer(ui, peeropts, dest)
607 626
608 627
609 628 # Recomputing caches is often slow on big repos, so copy them.
610 629 def _copycache(srcrepo, dstcachedir, fname):
611 630 """copy a cache from srcrepo to destcachedir (if it exists)"""
612 631 srcfname = srcrepo.cachevfs.join(fname)
613 632 dstfname = os.path.join(dstcachedir, fname)
614 633 if os.path.exists(srcfname):
615 634 if not os.path.exists(dstcachedir):
616 635 os.mkdir(dstcachedir)
617 636 util.copyfile(srcfname, dstfname)
618 637
619 638
620 639 def clone(
621 640 ui,
622 641 peeropts,
623 642 source,
624 643 dest=None,
625 644 pull=False,
626 645 revs=None,
627 646 update=True,
628 647 stream=False,
629 648 branch=None,
630 649 shareopts=None,
631 650 storeincludepats=None,
632 651 storeexcludepats=None,
633 652 depth=None,
634 653 ):
635 654 """Make a copy of an existing repository.
636 655
637 656 Create a copy of an existing repository in a new directory. The
638 657 source and destination are URLs, as passed to the repository
639 658 function. Returns a pair of repository peers, the source and
640 659 newly created destination.
641 660
642 661 The location of the source is added to the new repository's
643 662 .hg/hgrc file, as the default to be used for future pulls and
644 663 pushes.
645 664
646 665 If an exception is raised, the partly cloned/updated destination
647 666 repository will be deleted.
648 667
649 668 Arguments:
650 669
651 670 source: repository object or URL
652 671
653 672 dest: URL of destination repository to create (defaults to base
654 673 name of source repository)
655 674
656 675 pull: always pull from source repository, even in local case or if the
657 676 server prefers streaming
658 677
659 678 stream: stream raw data uncompressed from repository (fast over
660 679 LAN, slow over WAN)
661 680
662 681 revs: revision to clone up to (implies pull=True)
663 682
664 683 update: update working directory after clone completes, if
665 684 destination is local repository (True means update to default rev,
666 685 anything else is treated as a revision)
667 686
668 687 branch: branches to clone
669 688
670 689 shareopts: dict of options to control auto sharing behavior. The "pool" key
671 690 activates auto sharing mode and defines the directory for stores. The
672 691 "mode" key determines how to construct the directory name of the shared
673 692 repository. "identity" means the name is derived from the node of the first
674 693 changeset in the repository. "remote" means the name is derived from the
675 694 remote's path/URL. Defaults to "identity."
676 695
677 696 storeincludepats and storeexcludepats: sets of file patterns to include and
678 697 exclude in the repository copy, respectively. If not defined, all files
679 698 will be included (a "full" clone). Otherwise a "narrow" clone containing
680 699 only the requested files will be performed. If ``storeincludepats`` is not
681 700 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
682 701 ``path:.``. If both are empty sets, no files will be cloned.
683 702 """
684 703
685 704 if isinstance(source, bytes):
686 705 src = urlutil.get_clone_path(ui, source, branch)
687 706 origsource, source, branches = src
688 707 srcpeer = peer(ui, peeropts, source)
689 708 else:
690 709 srcpeer = source.peer() # in case we were called with a localrepo
691 710 branches = (None, branch or [])
692 711 origsource = source = srcpeer.url()
693 712 srclock = destlock = destwlock = cleandir = None
694 713 destpeer = None
695 714 try:
696 715 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
697 716
698 717 if dest is None:
699 718 dest = defaultdest(source)
700 719 if dest:
701 720 ui.status(_(b"destination directory: %s\n") % dest)
702 721 else:
703 722 dest = urlutil.get_clone_path(ui, dest)[0]
704 723
705 724 dest = urlutil.urllocalpath(dest)
706 725 source = urlutil.urllocalpath(source)
707 726
708 727 if not dest:
709 728 raise error.InputError(_(b"empty destination path is not valid"))
710 729
711 730 destvfs = vfsmod.vfs(dest, expandpath=True)
712 731 if destvfs.lexists():
713 732 if not destvfs.isdir():
714 733 raise error.InputError(
715 734 _(b"destination '%s' already exists") % dest
716 735 )
717 736 elif destvfs.listdir():
718 737 raise error.InputError(
719 738 _(b"destination '%s' is not empty") % dest
720 739 )
721 740
722 741 createopts = {}
723 742 narrow = False
724 743
725 744 if storeincludepats is not None:
726 745 narrowspec.validatepatterns(storeincludepats)
727 746 narrow = True
728 747
729 748 if storeexcludepats is not None:
730 749 narrowspec.validatepatterns(storeexcludepats)
731 750 narrow = True
732 751
733 752 if narrow:
734 753 # Include everything by default if only exclusion patterns defined.
735 754 if storeexcludepats and not storeincludepats:
736 755 storeincludepats = {b'path:.'}
737 756
738 757 createopts[b'narrowfiles'] = True
739 758
740 759 if depth:
741 760 createopts[b'shallowfilestore'] = True
742 761
743 762 if srcpeer.capable(b'lfs-serve'):
744 763 # Repository creation honors the config if it disabled the extension, so
745 764 # we can't just announce that lfs will be enabled. This check avoids
746 765 # saying that lfs will be enabled, and then saying it's an unknown
747 766 # feature. The lfs creation option is set in either case so that a
748 767 # requirement is added. If the extension is explicitly disabled but the
749 768 # requirement is set, the clone aborts early, before transferring any
750 769 # data.
751 770 createopts[b'lfs'] = True
752 771
753 772 if extensions.disabled_help(b'lfs'):
754 773 ui.status(
755 774 _(
756 775 b'(remote is using large file support (lfs), but it is '
757 776 b'explicitly disabled in the local configuration)\n'
758 777 )
759 778 )
760 779 else:
761 780 ui.status(
762 781 _(
763 782 b'(remote is using large file support (lfs); lfs will '
764 783 b'be enabled for this repository)\n'
765 784 )
766 785 )
767 786
768 787 shareopts = shareopts or {}
769 788 sharepool = shareopts.get(b'pool')
770 789 sharenamemode = shareopts.get(b'mode')
771 790 if sharepool and islocal(dest):
772 791 sharepath = None
773 792 if sharenamemode == b'identity':
774 793 # Resolve the name from the initial changeset in the remote
775 794 # repository. This returns nullid when the remote is empty. It
776 795 # raises RepoLookupError if revision 0 is filtered or otherwise
777 796 # not available. If we fail to resolve, sharing is not enabled.
778 797 try:
779 798 with srcpeer.commandexecutor() as e:
780 799 rootnode = e.callcommand(
781 800 b'lookup',
782 801 {
783 802 b'key': b'0',
784 803 },
785 804 ).result()
786 805
787 806 if rootnode != sha1nodeconstants.nullid:
788 807 sharepath = os.path.join(sharepool, hex(rootnode))
789 808 else:
790 809 ui.status(
791 810 _(
792 811 b'(not using pooled storage: '
793 812 b'remote appears to be empty)\n'
794 813 )
795 814 )
796 815 except error.RepoLookupError:
797 816 ui.status(
798 817 _(
799 818 b'(not using pooled storage: '
800 819 b'unable to resolve identity of remote)\n'
801 820 )
802 821 )
803 822 elif sharenamemode == b'remote':
804 823 sharepath = os.path.join(
805 824 sharepool, hex(hashutil.sha1(source).digest())
806 825 )
807 826 else:
808 827 raise error.Abort(
809 828 _(b'unknown share naming mode: %s') % sharenamemode
810 829 )
811 830
812 831 # TODO this is a somewhat arbitrary restriction.
813 832 if narrow:
814 833 ui.status(
815 834 _(b'(pooled storage not supported for narrow clones)\n')
816 835 )
817 836 sharepath = None
818 837
819 838 if sharepath:
820 839 return clonewithshare(
821 840 ui,
822 841 peeropts,
823 842 sharepath,
824 843 source,
825 844 srcpeer,
826 845 dest,
827 846 pull=pull,
828 847 rev=revs,
829 848 update=update,
830 849 stream=stream,
831 850 )
832 851
833 852 srcrepo = srcpeer.local()
834 853
835 854 abspath = origsource
836 855 if islocal(origsource):
837 856 abspath = util.abspath(urlutil.urllocalpath(origsource))
838 857
839 858 if islocal(dest):
840 859 if os.path.exists(dest):
841 860 # only clean up directories we create ourselves
842 861 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
843 862 cleandir = hgdir
844 863 else:
845 864 cleandir = dest
846 865
847 866 copy = False
848 867 if (
849 868 srcrepo
850 869 and srcrepo.cancopy()
851 870 and islocal(dest)
852 871 and not phases.hassecret(srcrepo)
853 872 ):
854 873 copy = not pull and not revs
855 874
856 875 # TODO this is a somewhat arbitrary restriction.
857 876 if narrow:
858 877 copy = False
859 878
860 879 if copy:
861 880 try:
862 881 # we use a lock here because if we race with commit, we
863 882 # can end up with extra data in the cloned revlogs that's
864 883 # not pointed to by changesets, thus causing verify to
865 884 # fail
866 885 srclock = srcrepo.lock(wait=False)
867 886 except error.LockError:
868 887 copy = False
869 888
870 889 if copy:
871 890 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
872 891
873 892 destrootpath = urlutil.urllocalpath(dest)
874 893 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
875 894 localrepo.createrepository(
876 895 ui,
877 896 destrootpath,
878 897 requirements=dest_reqs,
879 898 )
880 899 destrepo = localrepo.makelocalrepository(ui, destrootpath)
881 900
882 901 destwlock = destrepo.wlock()
883 902 destlock = destrepo.lock()
884 903 from . import streamclone # avoid cycle
885 904
886 905 streamclone.local_copy(srcrepo, destrepo)
887 906
888 907 # we need to re-init the repo after manually copying the data
889 908 # into it
890 909 destpeer = peer(srcrepo, peeropts, dest)
891 910
892 911 # make the peer aware that is it already locked
893 912 #
894 913 # important:
895 914 #
896 915 # We still need to release that lock at the end of the function
897 916 destpeer.local()._lockref = weakref.ref(destlock)
898 917 destpeer.local()._wlockref = weakref.ref(destwlock)
899 918 # dirstate also needs to be copied because `_wlockref` has a reference
900 919 # to it: this dirstate is saved to disk when the wlock is released
901 920 destpeer.local().dirstate = destrepo.dirstate
902 921
903 922 srcrepo.hook(
904 923 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
905 924 )
906 925 else:
907 926 try:
908 927 # only pass ui when no srcrepo
909 928 destpeer = peer(
910 929 srcrepo or ui,
911 930 peeropts,
912 931 dest,
913 932 create=True,
914 933 createopts=createopts,
915 934 )
916 935 except FileExistsError:
917 936 cleandir = None
918 937 raise error.Abort(_(b"destination '%s' already exists") % dest)
919 938
920 939 if revs:
921 940 if not srcpeer.capable(b'lookup'):
922 941 raise error.Abort(
923 942 _(
924 943 b"src repository does not support "
925 944 b"revision lookup and so doesn't "
926 945 b"support clone by revision"
927 946 )
928 947 )
929 948
930 949 # TODO this is batchable.
931 950 remoterevs = []
932 951 for rev in revs:
933 952 with srcpeer.commandexecutor() as e:
934 953 remoterevs.append(
935 954 e.callcommand(
936 955 b'lookup',
937 956 {
938 957 b'key': rev,
939 958 },
940 959 ).result()
941 960 )
942 961 revs = remoterevs
943 962
944 963 checkout = revs[0]
945 964 else:
946 965 revs = None
947 966 local = destpeer.local()
948 967 if local:
949 968 if narrow:
950 969 with local.wlock(), local.lock():
951 970 local.setnarrowpats(storeincludepats, storeexcludepats)
952 971 narrowspec.copytoworkingcopy(local)
953 972
954 973 u = urlutil.url(abspath)
955 974 defaulturl = bytes(u)
956 975 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
957 976 if not stream:
958 977 if pull:
959 978 stream = False
960 979 else:
961 980 stream = None
962 981 # internal config: ui.quietbookmarkmove
963 982 overrides = {(b'ui', b'quietbookmarkmove'): True}
964 983 with local.ui.configoverride(overrides, b'clone'):
965 984 exchange.pull(
966 985 local,
967 986 srcpeer,
968 987 heads=revs,
969 988 streamclonerequested=stream,
970 989 includepats=storeincludepats,
971 990 excludepats=storeexcludepats,
972 991 depth=depth,
973 992 )
974 993 elif srcrepo:
975 994 # TODO lift restriction once exchange.push() accepts narrow
976 995 # push.
977 996 if narrow:
978 997 raise error.Abort(
979 998 _(
980 999 b'narrow clone not available for '
981 1000 b'remote destinations'
982 1001 )
983 1002 )
984 1003
985 1004 exchange.push(
986 1005 srcrepo,
987 1006 destpeer,
988 1007 revs=revs,
989 1008 bookmarks=srcrepo._bookmarks.keys(),
990 1009 )
991 1010 else:
992 1011 raise error.Abort(
993 1012 _(b"clone from remote to remote not supported")
994 1013 )
995 1014
996 1015 cleandir = None
997 1016
998 1017 destrepo = destpeer.local()
999 1018 if destrepo:
1000 1019 template = uimod.samplehgrcs[b'cloned']
1001 1020 u = urlutil.url(abspath)
1002 1021 u.passwd = None
1003 1022 defaulturl = bytes(u)
1004 1023 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1005 1024 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1006 1025
1007 1026 if ui.configbool(b'experimental', b'remotenames'):
1008 1027 logexchange.pullremotenames(destrepo, srcpeer)
1009 1028
1010 1029 if update:
1011 1030 if update is not True:
1012 1031 with srcpeer.commandexecutor() as e:
1013 1032 checkout = e.callcommand(
1014 1033 b'lookup',
1015 1034 {
1016 1035 b'key': update,
1017 1036 },
1018 1037 ).result()
1019 1038
1020 1039 uprev = None
1021 1040 status = None
1022 1041 if checkout is not None:
1023 1042 # Some extensions (at least hg-git and hg-subversion) have
1024 1043 # a peer.lookup() implementation that returns a name instead
1025 1044 # of a nodeid. We work around it here until we've figured
1026 1045 # out a better solution.
1027 1046 if len(checkout) == 20 and checkout in destrepo:
1028 1047 uprev = checkout
1029 1048 elif scmutil.isrevsymbol(destrepo, checkout):
1030 1049 uprev = scmutil.revsymbol(destrepo, checkout).node()
1031 1050 else:
1032 1051 if update is not True:
1033 1052 try:
1034 1053 uprev = destrepo.lookup(update)
1035 1054 except error.RepoLookupError:
1036 1055 pass
1037 1056 if uprev is None:
1038 1057 try:
1039 1058 if destrepo._activebookmark:
1040 1059 uprev = destrepo.lookup(destrepo._activebookmark)
1041 1060 update = destrepo._activebookmark
1042 1061 else:
1043 1062 uprev = destrepo._bookmarks[b'@']
1044 1063 update = b'@'
1045 1064 bn = destrepo[uprev].branch()
1046 1065 if bn == b'default':
1047 1066 status = _(b"updating to bookmark %s\n" % update)
1048 1067 else:
1049 1068 status = (
1050 1069 _(b"updating to bookmark %s on branch %s\n")
1051 1070 ) % (update, bn)
1052 1071 except KeyError:
1053 1072 try:
1054 1073 uprev = destrepo.branchtip(b'default')
1055 1074 except error.RepoLookupError:
1056 1075 uprev = destrepo.lookup(b'tip')
1057 1076 if not status:
1058 1077 bn = destrepo[uprev].branch()
1059 1078 status = _(b"updating to branch %s\n") % bn
1060 1079 destrepo.ui.status(status)
1061 1080 _update(destrepo, uprev)
1062 1081 if update in destrepo._bookmarks:
1063 1082 bookmarks.activate(destrepo, update)
1064 1083 if destlock is not None:
1065 1084 release(destlock)
1066 1085 if destwlock is not None:
1067 1086 release(destlock)
1068 1087 # here is a tiny windows were someone could end up writing the
1069 1088 # repository before the cache are sure to be warm. This is "fine"
1070 1089 # as the only "bad" outcome would be some slowness. That potential
1071 1090 # slowness already affect reader.
1072 1091 with destrepo.lock():
1073 1092 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1074 1093 finally:
1075 1094 release(srclock, destlock, destwlock)
1076 1095 if cleandir is not None:
1077 1096 shutil.rmtree(cleandir, True)
1078 1097 if srcpeer is not None:
1079 1098 srcpeer.close()
1080 1099 if destpeer and destpeer.local() is None:
1081 1100 destpeer.close()
1082 1101 return srcpeer, destpeer
1083 1102
1084 1103
1085 1104 def _showstats(repo, stats, quietempty=False):
1086 1105 if quietempty and stats.isempty():
1087 1106 return
1088 1107 repo.ui.status(
1089 1108 _(
1090 1109 b"%d files updated, %d files merged, "
1091 1110 b"%d files removed, %d files unresolved\n"
1092 1111 )
1093 1112 % (
1094 1113 stats.updatedcount,
1095 1114 stats.mergedcount,
1096 1115 stats.removedcount,
1097 1116 stats.unresolvedcount,
1098 1117 )
1099 1118 )
1100 1119
1101 1120
1102 1121 def updaterepo(repo, node, overwrite, updatecheck=None):
1103 1122 """Update the working directory to node.
1104 1123
1105 1124 When overwrite is set, changes are clobbered, merged else
1106 1125
1107 1126 returns stats (see pydoc mercurial.merge.applyupdates)"""
1108 1127 repo.ui.deprecwarn(
1109 1128 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1110 1129 b'5.7',
1111 1130 )
1112 1131 return mergemod._update(
1113 1132 repo,
1114 1133 node,
1115 1134 branchmerge=False,
1116 1135 force=overwrite,
1117 1136 labels=[b'working copy', b'destination'],
1118 1137 updatecheck=updatecheck,
1119 1138 )
1120 1139
1121 1140
1122 1141 def update(repo, node, quietempty=False, updatecheck=None):
1123 1142 """update the working directory to node"""
1124 1143 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1125 1144 _showstats(repo, stats, quietempty)
1126 1145 if stats.unresolvedcount:
1127 1146 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1128 1147 return stats.unresolvedcount > 0
1129 1148
1130 1149
1131 1150 # naming conflict in clone()
1132 1151 _update = update
1133 1152
1134 1153
1135 1154 def clean(repo, node, show_stats=True, quietempty=False):
1136 1155 """forcibly switch the working directory to node, clobbering changes"""
1137 1156 stats = mergemod.clean_update(repo[node])
1138 1157 assert stats.unresolvedcount == 0
1139 1158 if show_stats:
1140 1159 _showstats(repo, stats, quietempty)
1141 1160 return False
1142 1161
1143 1162
1144 1163 # naming conflict in updatetotally()
1145 1164 _clean = clean
1146 1165
1147 1166 _VALID_UPDATECHECKS = {
1148 1167 mergemod.UPDATECHECK_ABORT,
1149 1168 mergemod.UPDATECHECK_NONE,
1150 1169 mergemod.UPDATECHECK_LINEAR,
1151 1170 mergemod.UPDATECHECK_NO_CONFLICT,
1152 1171 }
1153 1172
1154 1173
1155 1174 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1156 1175 """Update the working directory with extra care for non-file components
1157 1176
1158 1177 This takes care of non-file components below:
1159 1178
1160 1179 :bookmark: might be advanced or (in)activated
1161 1180
1162 1181 This takes arguments below:
1163 1182
1164 1183 :checkout: to which revision the working directory is updated
1165 1184 :brev: a name, which might be a bookmark to be activated after updating
1166 1185 :clean: whether changes in the working directory can be discarded
1167 1186 :updatecheck: how to deal with a dirty working directory
1168 1187
1169 1188 Valid values for updatecheck are the UPDATECHECK_* constants
1170 1189 defined in the merge module. Passing `None` will result in using the
1171 1190 configured default.
1172 1191
1173 1192 * ABORT: abort if the working directory is dirty
1174 1193 * NONE: don't check (merge working directory changes into destination)
1175 1194 * LINEAR: check that update is linear before merging working directory
1176 1195 changes into destination
1177 1196 * NO_CONFLICT: check that the update does not result in file merges
1178 1197
1179 1198 This returns whether conflict is detected at updating or not.
1180 1199 """
1181 1200 if updatecheck is None:
1182 1201 updatecheck = ui.config(b'commands', b'update.check')
1183 1202 if updatecheck not in _VALID_UPDATECHECKS:
1184 1203 # If not configured, or invalid value configured
1185 1204 updatecheck = mergemod.UPDATECHECK_LINEAR
1186 1205 if updatecheck not in _VALID_UPDATECHECKS:
1187 1206 raise ValueError(
1188 1207 r'Invalid updatecheck value %r (can accept %r)'
1189 1208 % (updatecheck, _VALID_UPDATECHECKS)
1190 1209 )
1191 1210 with repo.wlock():
1192 1211 movemarkfrom = None
1193 1212 warndest = False
1194 1213 if checkout is None:
1195 1214 updata = destutil.destupdate(repo, clean=clean)
1196 1215 checkout, movemarkfrom, brev = updata
1197 1216 warndest = True
1198 1217
1199 1218 if clean:
1200 1219 ret = _clean(repo, checkout)
1201 1220 else:
1202 1221 if updatecheck == mergemod.UPDATECHECK_ABORT:
1203 1222 cmdutil.bailifchanged(repo, merge=False)
1204 1223 updatecheck = mergemod.UPDATECHECK_NONE
1205 1224 ret = _update(repo, checkout, updatecheck=updatecheck)
1206 1225
1207 1226 if not ret and movemarkfrom:
1208 1227 if movemarkfrom == repo[b'.'].node():
1209 1228 pass # no-op update
1210 1229 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1211 1230 b = ui.label(repo._activebookmark, b'bookmarks.active')
1212 1231 ui.status(_(b"updating bookmark %s\n") % b)
1213 1232 else:
1214 1233 # this can happen with a non-linear update
1215 1234 b = ui.label(repo._activebookmark, b'bookmarks')
1216 1235 ui.status(_(b"(leaving bookmark %s)\n") % b)
1217 1236 bookmarks.deactivate(repo)
1218 1237 elif brev in repo._bookmarks:
1219 1238 if brev != repo._activebookmark:
1220 1239 b = ui.label(brev, b'bookmarks.active')
1221 1240 ui.status(_(b"(activating bookmark %s)\n") % b)
1222 1241 bookmarks.activate(repo, brev)
1223 1242 elif brev:
1224 1243 if repo._activebookmark:
1225 1244 b = ui.label(repo._activebookmark, b'bookmarks')
1226 1245 ui.status(_(b"(leaving bookmark %s)\n") % b)
1227 1246 bookmarks.deactivate(repo)
1228 1247
1229 1248 if warndest:
1230 1249 destutil.statusotherdests(ui, repo)
1231 1250
1232 1251 return ret
1233 1252
1234 1253
1235 1254 def merge(
1236 1255 ctx,
1237 1256 force=False,
1238 1257 remind=True,
1239 1258 labels=None,
1240 1259 ):
1241 1260 """Branch merge with node, resolving changes. Return true if any
1242 1261 unresolved conflicts."""
1243 1262 repo = ctx.repo()
1244 1263 stats = mergemod.merge(ctx, force=force, labels=labels)
1245 1264 _showstats(repo, stats)
1246 1265 if stats.unresolvedcount:
1247 1266 repo.ui.status(
1248 1267 _(
1249 1268 b"use 'hg resolve' to retry unresolved file merges "
1250 1269 b"or 'hg merge --abort' to abandon\n"
1251 1270 )
1252 1271 )
1253 1272 elif remind:
1254 1273 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1255 1274 return stats.unresolvedcount > 0
1256 1275
1257 1276
1258 1277 def abortmerge(ui, repo):
1259 1278 ms = mergestatemod.mergestate.read(repo)
1260 1279 if ms.active():
1261 1280 # there were conflicts
1262 1281 node = ms.localctx.hex()
1263 1282 else:
1264 1283 # there were no conficts, mergestate was not stored
1265 1284 node = repo[b'.'].hex()
1266 1285
1267 1286 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1268 1287 stats = mergemod.clean_update(repo[node])
1269 1288 assert stats.unresolvedcount == 0
1270 1289 _showstats(repo, stats)
1271 1290
1272 1291
1273 1292 def _incoming(
1274 1293 displaychlist,
1275 1294 subreporecurse,
1276 1295 ui,
1277 1296 repo,
1278 1297 source,
1279 1298 opts,
1280 1299 buffered=False,
1281 1300 subpath=None,
1282 1301 ):
1283 1302 """
1284 1303 Helper for incoming / gincoming.
1285 1304 displaychlist gets called with
1286 1305 (remoterepo, incomingchangesetlist, displayer) parameters,
1287 1306 and is supposed to contain only code that can't be unified.
1288 1307 """
1289 1308 srcs = urlutil.get_pull_paths(repo, ui, [source])
1290 1309 srcs = list(srcs)
1291 1310 if len(srcs) != 1:
1292 1311 msg = _(b'for now, incoming supports only a single source, %d provided')
1293 1312 msg %= len(srcs)
1294 1313 raise error.Abort(msg)
1295 1314 path = srcs[0]
1296 1315 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1297 1316 if subpath is not None:
1298 1317 subpath = urlutil.url(subpath)
1299 1318 if subpath.isabs():
1300 1319 source = bytes(subpath)
1301 1320 else:
1302 1321 p = urlutil.url(source)
1303 1322 if p.islocal():
1304 1323 normpath = os.path.normpath
1305 1324 else:
1306 1325 normpath = posixpath.normpath
1307 1326 p.path = normpath(b'%s/%s' % (p.path, subpath))
1308 1327 source = bytes(p)
1309 1328 other = peer(repo, opts, source)
1310 1329 cleanupfn = other.close
1311 1330 try:
1312 1331 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1313 1332 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1314 1333
1315 1334 if revs:
1316 1335 revs = [other.lookup(rev) for rev in revs]
1317 1336 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1318 1337 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1319 1338 )
1320 1339
1321 1340 if not chlist:
1322 1341 ui.status(_(b"no changes found\n"))
1323 1342 return subreporecurse()
1324 1343 ui.pager(b'incoming')
1325 1344 displayer = logcmdutil.changesetdisplayer(
1326 1345 ui, other, opts, buffered=buffered
1327 1346 )
1328 1347 displaychlist(other, chlist, displayer)
1329 1348 displayer.close()
1330 1349 finally:
1331 1350 cleanupfn()
1332 1351 subreporecurse()
1333 1352 return 0 # exit code is zero since we found incoming changes
1334 1353
1335 1354
1336 1355 def incoming(ui, repo, source, opts, subpath=None):
1337 1356 def subreporecurse():
1338 1357 ret = 1
1339 1358 if opts.get(b'subrepos'):
1340 1359 ctx = repo[None]
1341 1360 for subpath in sorted(ctx.substate):
1342 1361 sub = ctx.sub(subpath)
1343 1362 ret = min(ret, sub.incoming(ui, source, opts))
1344 1363 return ret
1345 1364
1346 1365 def display(other, chlist, displayer):
1347 1366 limit = logcmdutil.getlimit(opts)
1348 1367 if opts.get(b'newest_first'):
1349 1368 chlist.reverse()
1350 1369 count = 0
1351 1370 for n in chlist:
1352 1371 if limit is not None and count >= limit:
1353 1372 break
1354 1373 parents = [
1355 1374 p for p in other.changelog.parents(n) if p != repo.nullid
1356 1375 ]
1357 1376 if opts.get(b'no_merges') and len(parents) == 2:
1358 1377 continue
1359 1378 count += 1
1360 1379 displayer.show(other[n])
1361 1380
1362 1381 return _incoming(
1363 1382 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1364 1383 )
1365 1384
1366 1385
1367 1386 def _outgoing(ui, repo, dests, opts, subpath=None):
1368 1387 out = set()
1369 1388 others = []
1370 1389 for path in urlutil.get_push_paths(repo, ui, dests):
1371 1390 dest = path.pushloc or path.loc
1372 1391 if subpath is not None:
1373 1392 subpath = urlutil.url(subpath)
1374 1393 if subpath.isabs():
1375 1394 dest = bytes(subpath)
1376 1395 else:
1377 1396 p = urlutil.url(dest)
1378 1397 if p.islocal():
1379 1398 normpath = os.path.normpath
1380 1399 else:
1381 1400 normpath = posixpath.normpath
1382 1401 p.path = normpath(b'%s/%s' % (p.path, subpath))
1383 1402 dest = bytes(p)
1384 1403 branches = path.branch, opts.get(b'branch') or []
1385 1404
1386 1405 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1387 1406 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1388 1407 if revs:
1389 1408 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1390 1409
1391 1410 other = peer(repo, opts, dest)
1392 1411 try:
1393 1412 outgoing = discovery.findcommonoutgoing(
1394 1413 repo, other, revs, force=opts.get(b'force')
1395 1414 )
1396 1415 o = outgoing.missing
1397 1416 out.update(o)
1398 1417 if not o:
1399 1418 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1400 1419 others.append(other)
1401 1420 except: # re-raises
1402 1421 other.close()
1403 1422 raise
1404 1423 # make sure this is ordered by revision number
1405 1424 outgoing_revs = list(out)
1406 1425 cl = repo.changelog
1407 1426 outgoing_revs.sort(key=cl.rev)
1408 1427 return outgoing_revs, others
1409 1428
1410 1429
1411 1430 def _outgoing_recurse(ui, repo, dests, opts):
1412 1431 ret = 1
1413 1432 if opts.get(b'subrepos'):
1414 1433 ctx = repo[None]
1415 1434 for subpath in sorted(ctx.substate):
1416 1435 sub = ctx.sub(subpath)
1417 1436 ret = min(ret, sub.outgoing(ui, dests, opts))
1418 1437 return ret
1419 1438
1420 1439
1421 1440 def _outgoing_filter(repo, revs, opts):
1422 1441 """apply revision filtering/ordering option for outgoing"""
1423 1442 limit = logcmdutil.getlimit(opts)
1424 1443 no_merges = opts.get(b'no_merges')
1425 1444 if opts.get(b'newest_first'):
1426 1445 revs.reverse()
1427 1446 if limit is None and not no_merges:
1428 1447 for r in revs:
1429 1448 yield r
1430 1449 return
1431 1450
1432 1451 count = 0
1433 1452 cl = repo.changelog
1434 1453 for n in revs:
1435 1454 if limit is not None and count >= limit:
1436 1455 break
1437 1456 parents = [p for p in cl.parents(n) if p != repo.nullid]
1438 1457 if no_merges and len(parents) == 2:
1439 1458 continue
1440 1459 count += 1
1441 1460 yield n
1442 1461
1443 1462
1444 1463 def outgoing(ui, repo, dests, opts, subpath=None):
1445 1464 if opts.get(b'graph'):
1446 1465 logcmdutil.checkunsupportedgraphflags([], opts)
1447 1466 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1448 1467 ret = 1
1449 1468 try:
1450 1469 if o:
1451 1470 ret = 0
1452 1471
1453 1472 if opts.get(b'graph'):
1454 1473 revdag = logcmdutil.graphrevs(repo, o, opts)
1455 1474 ui.pager(b'outgoing')
1456 1475 displayer = logcmdutil.changesetdisplayer(
1457 1476 ui, repo, opts, buffered=True
1458 1477 )
1459 1478 logcmdutil.displaygraph(
1460 1479 ui, repo, revdag, displayer, graphmod.asciiedges
1461 1480 )
1462 1481 else:
1463 1482 ui.pager(b'outgoing')
1464 1483 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1465 1484 for n in _outgoing_filter(repo, o, opts):
1466 1485 displayer.show(repo[n])
1467 1486 displayer.close()
1468 1487 for oth in others:
1469 1488 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1470 1489 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1471 1490 return ret # exit code is zero since we found outgoing changes
1472 1491 finally:
1473 1492 for oth in others:
1474 1493 oth.close()
1475 1494
1476 1495
1477 1496 def verify(repo, level=None):
1478 1497 """verify the consistency of a repository"""
1479 1498 ret = verifymod.verify(repo, level=level)
1480 1499
1481 1500 # Broken subrepo references in hidden csets don't seem worth worrying about,
1482 1501 # since they can't be pushed/pulled, and --hidden can be used if they are a
1483 1502 # concern.
1484 1503
1485 1504 # pathto() is needed for -R case
1486 1505 revs = repo.revs(
1487 1506 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1488 1507 )
1489 1508
1490 1509 if revs:
1491 1510 repo.ui.status(_(b'checking subrepo links\n'))
1492 1511 for rev in revs:
1493 1512 ctx = repo[rev]
1494 1513 try:
1495 1514 for subpath in ctx.substate:
1496 1515 try:
1497 1516 ret = (
1498 1517 ctx.sub(subpath, allowcreate=False).verify() or ret
1499 1518 )
1500 1519 except error.RepoError as e:
1501 1520 repo.ui.warn(b'%d: %s\n' % (rev, e))
1502 1521 except Exception:
1503 1522 repo.ui.warn(
1504 1523 _(b'.hgsubstate is corrupt in revision %s\n')
1505 1524 % short(ctx.node())
1506 1525 )
1507 1526
1508 1527 return ret
1509 1528
1510 1529
1511 1530 def remoteui(src, opts):
1512 1531 """build a remote ui from ui or repo and opts"""
1513 1532 if util.safehasattr(src, b'baseui'): # looks like a repository
1514 1533 dst = src.baseui.copy() # drop repo-specific config
1515 1534 src = src.ui # copy target options from repo
1516 1535 else: # assume it's a global ui object
1517 1536 dst = src.copy() # keep all global options
1518 1537
1519 1538 # copy ssh-specific options
1520 1539 for o in b'ssh', b'remotecmd':
1521 1540 v = opts.get(o) or src.config(b'ui', o)
1522 1541 if v:
1523 1542 dst.setconfig(b"ui", o, v, b'copied')
1524 1543
1525 1544 # copy bundle-specific options
1526 1545 r = src.config(b'bundle', b'mainreporoot')
1527 1546 if r:
1528 1547 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1529 1548
1530 1549 # copy selected local settings to the remote ui
1531 1550 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1532 1551 for key, val in src.configitems(sect):
1533 1552 dst.setconfig(sect, key, val, b'copied')
1534 1553 v = src.config(b'web', b'cacerts')
1535 1554 if v:
1536 1555 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1537 1556
1538 1557 return dst
1539 1558
1540 1559
1541 1560 # Files of interest
1542 1561 # Used to check if the repository has changed looking at mtime and size of
1543 1562 # these files.
1544 1563 foi = [
1545 1564 (b'spath', b'00changelog.i'),
1546 1565 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1547 1566 (b'spath', b'obsstore'),
1548 1567 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1549 1568 ]
1550 1569
1551 1570
1552 1571 class cachedlocalrepo:
1553 1572 """Holds a localrepository that can be cached and reused."""
1554 1573
1555 1574 def __init__(self, repo):
1556 1575 """Create a new cached repo from an existing repo.
1557 1576
1558 1577 We assume the passed in repo was recently created. If the
1559 1578 repo has changed between when it was created and when it was
1560 1579 turned into a cache, it may not refresh properly.
1561 1580 """
1562 1581 assert isinstance(repo, localrepo.localrepository)
1563 1582 self._repo = repo
1564 1583 self._state, self.mtime = self._repostate()
1565 1584 self._filtername = repo.filtername
1566 1585
1567 1586 def fetch(self):
1568 1587 """Refresh (if necessary) and return a repository.
1569 1588
1570 1589 If the cached instance is out of date, it will be recreated
1571 1590 automatically and returned.
1572 1591
1573 1592 Returns a tuple of the repo and a boolean indicating whether a new
1574 1593 repo instance was created.
1575 1594 """
1576 1595 # We compare the mtimes and sizes of some well-known files to
1577 1596 # determine if the repo changed. This is not precise, as mtimes
1578 1597 # are susceptible to clock skew and imprecise filesystems and
1579 1598 # file content can change while maintaining the same size.
1580 1599
1581 1600 state, mtime = self._repostate()
1582 1601 if state == self._state:
1583 1602 return self._repo, False
1584 1603
1585 1604 repo = repository(self._repo.baseui, self._repo.url())
1586 1605 if self._filtername:
1587 1606 self._repo = repo.filtered(self._filtername)
1588 1607 else:
1589 1608 self._repo = repo.unfiltered()
1590 1609 self._state = state
1591 1610 self.mtime = mtime
1592 1611
1593 1612 return self._repo, True
1594 1613
1595 1614 def _repostate(self):
1596 1615 state = []
1597 1616 maxmtime = -1
1598 1617 for attr, fname in foi:
1599 1618 prefix = getattr(self._repo, attr)
1600 1619 p = os.path.join(prefix, fname)
1601 1620 try:
1602 1621 st = os.stat(p)
1603 1622 except OSError:
1604 1623 st = os.stat(prefix)
1605 1624 state.append((st[stat.ST_MTIME], st.st_size))
1606 1625 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1607 1626
1608 1627 return tuple(state), maxmtime
1609 1628
1610 1629 def copy(self):
1611 1630 """Obtain a copy of this class instance.
1612 1631
1613 1632 A new localrepository instance is obtained. The new instance should be
1614 1633 completely independent of the original.
1615 1634 """
1616 1635 repo = repository(self._repo.baseui, self._repo.origroot)
1617 1636 if self._filtername:
1618 1637 repo = repo.filtered(self._filtername)
1619 1638 else:
1620 1639 repo = repo.unfiltered()
1621 1640 c = cachedlocalrepo(repo)
1622 1641 c._state = self._state
1623 1642 c.mtime = self.mtime
1624 1643 return c
General Comments 0
You need to be logged in to leave comments. Login now