##// END OF EJS Templates
subrepo: propagate non-default path on outgoing...
Felipe Resende -
r52886:3e0f86f0 stable
parent child Browse files
Show More
@@ -1,1680 +1,1682 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 bundlerepo,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 graphmod,
33 33 httppeer,
34 34 localrepo,
35 35 lock,
36 36 logcmdutil,
37 37 logexchange,
38 38 merge as mergemod,
39 39 mergestate as mergestatemod,
40 40 narrowspec,
41 41 phases,
42 42 requirements,
43 43 scmutil,
44 44 sshpeer,
45 45 statichttprepo,
46 46 ui as uimod,
47 47 unionrepo,
48 48 url,
49 49 util,
50 50 verify as verifymod,
51 51 vfs as vfsmod,
52 52 )
53 53 from .interfaces import repository as repositorymod
54 54 from .utils import (
55 55 hashutil,
56 56 stringutil,
57 57 urlutil,
58 58 )
59 59
60 60
61 61 release = lock.release
62 62
63 63 # shared features
64 64 sharedbookmarks = b'bookmarks'
65 65
66 66
67 67 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
68 68 if hasattr(other, 'peer'):
69 69 # a courtesy to callers using a localrepo for other
70 70 peer = other.peer(remotehidden=remotehidden)
71 71 else:
72 72 peer = other
73 73 hashbranch, branches = branches
74 74 if not hashbranch and not branches:
75 75 x = revs or None
76 76 if revs:
77 77 y = revs[0]
78 78 else:
79 79 y = None
80 80 return x, y
81 81 if revs:
82 82 revs = list(revs)
83 83 else:
84 84 revs = []
85 85
86 86 if not peer.capable(b'branchmap'):
87 87 if branches:
88 88 raise error.Abort(_(b"remote branch lookup not supported"))
89 89 revs.append(hashbranch)
90 90 return revs, revs[0]
91 91
92 92 with peer.commandexecutor() as e:
93 93 branchmap = e.callcommand(b'branchmap', {}).result()
94 94
95 95 def primary(branch):
96 96 if branch == b'.':
97 97 if not lrepo:
98 98 raise error.Abort(_(b"dirstate branch not accessible"))
99 99 branch = lrepo.dirstate.branch()
100 100 if branch in branchmap:
101 101 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 102 return True
103 103 else:
104 104 return False
105 105
106 106 for branch in branches:
107 107 if not primary(branch):
108 108 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 109 if hashbranch:
110 110 if not primary(hashbranch):
111 111 revs.append(hashbranch)
112 112 return revs, revs[0]
113 113
114 114
115 115 def _isfile(path):
116 116 try:
117 117 # we use os.stat() directly here instead of os.path.isfile()
118 118 # because the latter started returning `False` on invalid path
119 119 # exceptions starting in 3.8 and we care about handling
120 120 # invalid paths specially here.
121 121 st = os.stat(path)
122 122 except ValueError as e:
123 123 msg = stringutil.forcebytestr(e)
124 124 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 125 except OSError:
126 126 return False
127 127 else:
128 128 return stat.S_ISREG(st.st_mode)
129 129
130 130
131 131 class LocalFactory:
132 132 """thin wrapper to dispatch between localrepo and bundle repo"""
133 133
134 134 @staticmethod
135 135 def islocal(path: bytes) -> bool:
136 136 path = util.expandpath(urlutil.urllocalpath(path))
137 137 return not _isfile(path)
138 138
139 139 @staticmethod
140 140 def instance(ui, path, *args, **kwargs):
141 141 path = util.expandpath(urlutil.urllocalpath(path))
142 142 if _isfile(path):
143 143 cls = bundlerepo
144 144 else:
145 145 cls = localrepo
146 146 return cls.instance(ui, path, *args, **kwargs)
147 147
148 148
149 149 repo_schemes = {
150 150 b'bundle': bundlerepo,
151 151 b'union': unionrepo,
152 152 b'file': LocalFactory,
153 153 }
154 154
155 155 peer_schemes = {
156 156 b'http': httppeer,
157 157 b'https': httppeer,
158 158 b'ssh': sshpeer,
159 159 b'static-http': statichttprepo,
160 160 }
161 161
162 162
163 163 def islocal(repo):
164 164 '''return true if repo (or path pointing to repo) is local'''
165 165 if isinstance(repo, bytes):
166 166 u = urlutil.url(repo)
167 167 scheme = u.scheme or b'file'
168 168 if scheme in peer_schemes:
169 169 cls = peer_schemes[scheme]
170 170 cls.make_peer # make sure we load the module
171 171 elif scheme in repo_schemes:
172 172 cls = repo_schemes[scheme]
173 173 cls.instance # make sure we load the module
174 174 else:
175 175 cls = LocalFactory
176 176 if hasattr(cls, 'islocal'):
177 177 return cls.islocal(repo) # pytype: disable=module-attr
178 178 return False
179 179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 180 return repo.local()
181 181
182 182
183 183 def openpath(ui, path, sendaccept=True):
184 184 '''open path with open if local, url.open if remote'''
185 185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 186 if pathurl.islocal():
187 187 return util.posixfile(pathurl.localpath(), b'rb')
188 188 else:
189 189 return url.open(ui, path, sendaccept=sendaccept)
190 190
191 191
192 192 # a list of (ui, repo) functions called for wire peer initialization
193 193 wirepeersetupfuncs = []
194 194
195 195
196 196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 197 ui = getattr(obj, "ui", ui)
198 198 for f in presetupfuncs or []:
199 199 f(ui, obj)
200 200 ui.log(b'extension', b'- executing reposetup hooks\n')
201 201 with util.timedcm('all reposetup') as allreposetupstats:
202 202 for name, module in extensions.extensions(ui):
203 203 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 204 hook = getattr(module, 'reposetup', None)
205 205 if hook:
206 206 with util.timedcm('reposetup %r', name) as stats:
207 207 hook(ui, obj)
208 208 msg = b' > reposetup for %s took %s\n'
209 209 ui.log(b'extension', msg, name, stats)
210 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 211 if not obj.local():
212 212 for f in wirepeersetupfuncs:
213 213 f(ui, obj)
214 214
215 215
216 216 def repository(
217 217 ui,
218 218 path=b'',
219 219 create=False,
220 220 presetupfuncs=None,
221 221 intents=None,
222 222 createopts=None,
223 223 ):
224 224 """return a repository object for the specified path"""
225 225 scheme = urlutil.url(path).scheme
226 226 if scheme is None:
227 227 scheme = b'file'
228 228 cls = repo_schemes.get(scheme)
229 229 if cls is None:
230 230 if scheme in peer_schemes:
231 231 raise error.Abort(_(b"repository '%s' is not local") % path)
232 232 cls = LocalFactory
233 233 repo = cls.instance(
234 234 ui,
235 235 path,
236 236 create,
237 237 intents=intents,
238 238 createopts=createopts,
239 239 )
240 240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 241 return repo.filtered(b'visible')
242 242
243 243
244 244 def peer(
245 245 uiorrepo,
246 246 opts,
247 247 path,
248 248 create=False,
249 249 intents=None,
250 250 createopts=None,
251 251 remotehidden=False,
252 252 ):
253 253 '''return a repository peer for the specified path'''
254 254 ui = getattr(uiorrepo, 'ui', uiorrepo)
255 255 rui = remoteui(uiorrepo, opts)
256 256 if hasattr(path, 'url'):
257 257 # this is already a urlutil.path object
258 258 peer_path = path
259 259 else:
260 260 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
261 261 scheme = peer_path.url.scheme # pytype: disable=attribute-error
262 262 if scheme in peer_schemes:
263 263 cls = peer_schemes[scheme]
264 264 peer = cls.make_peer(
265 265 rui,
266 266 peer_path,
267 267 create,
268 268 intents=intents,
269 269 createopts=createopts,
270 270 remotehidden=remotehidden,
271 271 )
272 272 _setup_repo_or_peer(rui, peer)
273 273 else:
274 274 # this is a repository
275 275 repo_path = peer_path.loc # pytype: disable=attribute-error
276 276 if not repo_path:
277 277 repo_path = peer_path.rawloc # pytype: disable=attribute-error
278 278 repo = repository(
279 279 rui,
280 280 repo_path,
281 281 create,
282 282 intents=intents,
283 283 createopts=createopts,
284 284 )
285 285 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
286 286 return peer
287 287
288 288
289 289 def defaultdest(source):
290 290 """return default destination of clone if none is given
291 291
292 292 >>> defaultdest(b'foo')
293 293 'foo'
294 294 >>> defaultdest(b'/foo/bar')
295 295 'bar'
296 296 >>> defaultdest(b'/')
297 297 ''
298 298 >>> defaultdest(b'')
299 299 ''
300 300 >>> defaultdest(b'http://example.org/')
301 301 ''
302 302 >>> defaultdest(b'http://example.org/foo/')
303 303 'foo'
304 304 """
305 305 path = urlutil.url(source).path
306 306 if not path:
307 307 return b''
308 308 return os.path.basename(os.path.normpath(path))
309 309
310 310
311 311 def sharedreposource(repo):
312 312 """Returns repository object for source repository of a shared repo.
313 313
314 314 If repo is not a shared repository, returns None.
315 315 """
316 316 if repo.sharedpath == repo.path:
317 317 return None
318 318
319 319 if hasattr(repo, 'srcrepo') and repo.srcrepo:
320 320 return repo.srcrepo
321 321
322 322 # the sharedpath always ends in the .hg; we want the path to the repo
323 323 source = repo.vfs.split(repo.sharedpath)[0]
324 324 srcurl, branches = urlutil.parseurl(source)
325 325 srcrepo = repository(repo.ui, srcurl)
326 326 repo.srcrepo = srcrepo
327 327 return srcrepo
328 328
329 329
330 330 def share(
331 331 ui,
332 332 source,
333 333 dest=None,
334 334 update=True,
335 335 bookmarks=True,
336 336 defaultpath=None,
337 337 relative=False,
338 338 ):
339 339 '''create a shared repository'''
340 340
341 341 not_local_msg = _(b'can only share local repositories')
342 342 if hasattr(source, 'local'):
343 343 if source.local() is None:
344 344 raise error.Abort(not_local_msg)
345 345 elif not islocal(source):
346 346 # XXX why are we getting bytes here ?
347 347 raise error.Abort(not_local_msg)
348 348
349 349 if not dest:
350 350 dest = defaultdest(source)
351 351 else:
352 352 dest = urlutil.get_clone_path_obj(ui, dest).loc
353 353
354 354 if isinstance(source, bytes):
355 355 source_path = urlutil.get_clone_path_obj(ui, source)
356 356 srcrepo = repository(ui, source_path.loc)
357 357 branches = (source_path.branch, [])
358 358 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
359 359 else:
360 360 srcrepo = source.local()
361 361 checkout = None
362 362
363 363 shareditems = set()
364 364 if bookmarks:
365 365 shareditems.add(sharedbookmarks)
366 366
367 367 r = repository(
368 368 ui,
369 369 dest,
370 370 create=True,
371 371 createopts={
372 372 b'sharedrepo': srcrepo,
373 373 b'sharedrelative': relative,
374 374 b'shareditems': shareditems,
375 375 },
376 376 )
377 377
378 378 postshare(srcrepo, r, defaultpath=defaultpath)
379 379 r = repository(ui, dest)
380 380 _postshareupdate(r, update, checkout=checkout)
381 381 return r
382 382
383 383
384 384 def _prependsourcehgrc(repo):
385 385 """copies the source repo config and prepend it in current repo .hg/hgrc
386 386 on unshare. This is only done if the share was perfomed using share safe
387 387 method where we share config of source in shares"""
388 388 srcvfs = vfsmod.vfs(repo.sharedpath)
389 389 dstvfs = vfsmod.vfs(repo.path)
390 390
391 391 if not srcvfs.exists(b'hgrc'):
392 392 return
393 393
394 394 currentconfig = b''
395 395 if dstvfs.exists(b'hgrc'):
396 396 currentconfig = dstvfs.read(b'hgrc')
397 397
398 398 with dstvfs(b'hgrc', b'wb') as fp:
399 399 sourceconfig = srcvfs.read(b'hgrc')
400 400 fp.write(b"# Config copied from shared source\n")
401 401 fp.write(sourceconfig)
402 402 fp.write(b'\n')
403 403 fp.write(currentconfig)
404 404
405 405
406 406 def unshare(ui, repo):
407 407 """convert a shared repository to a normal one
408 408
409 409 Copy the store data to the repo and remove the sharedpath data.
410 410
411 411 Returns a new repository object representing the unshared repository.
412 412
413 413 The passed repository object is not usable after this function is
414 414 called.
415 415 """
416 416
417 417 with repo.lock():
418 418 # we use locks here because if we race with commit, we
419 419 # can end up with extra data in the cloned revlogs that's
420 420 # not pointed to by changesets, thus causing verify to
421 421 # fail
422 422 destlock = copystore(ui, repo, repo.path)
423 423 with destlock or util.nullcontextmanager():
424 424 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
425 425 # we were sharing .hg/hgrc of the share source with the current
426 426 # repo. We need to copy that while unsharing otherwise it can
427 427 # disable hooks and other checks
428 428 _prependsourcehgrc(repo)
429 429
430 430 sharefile = repo.vfs.join(b'sharedpath')
431 431 util.rename(sharefile, sharefile + b'.old')
432 432
433 433 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
434 434 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
435 435 scmutil.writereporequirements(repo)
436 436
437 437 # Removing share changes some fundamental properties of the repo instance.
438 438 # So we instantiate a new repo object and operate on it rather than
439 439 # try to keep the existing repo usable.
440 440 newrepo = repository(repo.baseui, repo.root, create=False)
441 441
442 442 # TODO: figure out how to access subrepos that exist, but were previously
443 443 # removed from .hgsub
444 444 c = newrepo[b'.']
445 445 subs = c.substate
446 446 for s in sorted(subs):
447 447 c.sub(s).unshare()
448 448
449 449 localrepo.poisonrepository(repo)
450 450
451 451 return newrepo
452 452
453 453
454 454 def postshare(sourcerepo, destrepo, defaultpath=None):
455 455 """Called after a new shared repo is created.
456 456
457 457 The new repo only has a requirements file and pointer to the source.
458 458 This function configures additional shared data.
459 459
460 460 Extensions can wrap this function and write additional entries to
461 461 destrepo/.hg/shared to indicate additional pieces of data to be shared.
462 462 """
463 463 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
464 464 if default:
465 465 template = b'[paths]\ndefault = %s\n'
466 466 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
467 467 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
468 468 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
469 469 b"narrow-share"
470 470 ):
471 471 narrowspec.copytoworkingcopy(destrepo)
472 472
473 473
474 474 def _postshareupdate(repo, update, checkout=None):
475 475 """Maybe perform a working directory update after a shared repo is created.
476 476
477 477 ``update`` can be a boolean or a revision to update to.
478 478 """
479 479 if not update:
480 480 return
481 481
482 482 repo.ui.status(_(b"updating working directory\n"))
483 483 if update is not True:
484 484 checkout = update
485 485 for test in (checkout, b'default', b'tip'):
486 486 if test is None:
487 487 continue
488 488 try:
489 489 uprev = repo.lookup(test)
490 490 break
491 491 except error.RepoLookupError:
492 492 continue
493 493 _update(repo, uprev)
494 494
495 495
496 496 def copystore(ui, srcrepo, destpath):
497 497 """copy files from store of srcrepo in destpath
498 498
499 499 returns destlock
500 500 """
501 501 destlock = None
502 502 try:
503 503 hardlink = None
504 504 topic = _(b'linking') if hardlink else _(b'copying')
505 505 with ui.makeprogress(topic, unit=_(b'files')) as progress:
506 506 num = 0
507 507 srcpublishing = srcrepo.publishing()
508 508 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
509 509 dstvfs = vfsmod.vfs(destpath)
510 510 for f in srcrepo.store.copylist():
511 511 if srcpublishing and f.endswith(b'phaseroots'):
512 512 continue
513 513 dstbase = os.path.dirname(f)
514 514 if dstbase and not dstvfs.exists(dstbase):
515 515 dstvfs.mkdir(dstbase)
516 516 if srcvfs.exists(f):
517 517 if f.endswith(b'data'):
518 518 # 'dstbase' may be empty (e.g. revlog format 0)
519 519 lockfile = os.path.join(dstbase, b"lock")
520 520 # lock to avoid premature writing to the target
521 521 destlock = lock.lock(dstvfs, lockfile)
522 522 hardlink, n = util.copyfiles(
523 523 srcvfs.join(f), dstvfs.join(f), hardlink, progress
524 524 )
525 525 num += n
526 526 if hardlink:
527 527 ui.debug(b"linked %d files\n" % num)
528 528 else:
529 529 ui.debug(b"copied %d files\n" % num)
530 530 return destlock
531 531 except: # re-raises
532 532 release(destlock)
533 533 raise
534 534
535 535
536 536 def clonewithshare(
537 537 ui,
538 538 peeropts,
539 539 sharepath,
540 540 source,
541 541 srcpeer,
542 542 dest,
543 543 pull=False,
544 544 rev=None,
545 545 update=True,
546 546 stream=False,
547 547 ):
548 548 """Perform a clone using a shared repo.
549 549
550 550 The store for the repository will be located at <sharepath>/.hg. The
551 551 specified revisions will be cloned or pulled from "source". A shared repo
552 552 will be created at "dest" and a working copy will be created if "update" is
553 553 True.
554 554 """
555 555 revs = None
556 556 if rev:
557 557 if not srcpeer.capable(b'lookup'):
558 558 raise error.Abort(
559 559 _(
560 560 b"src repository does not support "
561 561 b"revision lookup and so doesn't "
562 562 b"support clone by revision"
563 563 )
564 564 )
565 565
566 566 # TODO this is batchable.
567 567 remoterevs = []
568 568 for r in rev:
569 569 with srcpeer.commandexecutor() as e:
570 570 remoterevs.append(
571 571 e.callcommand(
572 572 b'lookup',
573 573 {
574 574 b'key': r,
575 575 },
576 576 ).result()
577 577 )
578 578 revs = remoterevs
579 579
580 580 # Obtain a lock before checking for or cloning the pooled repo otherwise
581 581 # 2 clients may race creating or populating it.
582 582 pooldir = os.path.dirname(sharepath)
583 583 # lock class requires the directory to exist.
584 584 try:
585 585 util.makedir(pooldir, False)
586 586 except FileExistsError:
587 587 pass
588 588
589 589 poolvfs = vfsmod.vfs(pooldir)
590 590 basename = os.path.basename(sharepath)
591 591
592 592 with lock.lock(poolvfs, b'%s.lock' % basename):
593 593 if os.path.exists(sharepath):
594 594 ui.status(
595 595 _(b'(sharing from existing pooled repository %s)\n') % basename
596 596 )
597 597 else:
598 598 ui.status(
599 599 _(b'(sharing from new pooled repository %s)\n') % basename
600 600 )
601 601 # Always use pull mode because hardlinks in share mode don't work
602 602 # well. Never update because working copies aren't necessary in
603 603 # share mode.
604 604 clone(
605 605 ui,
606 606 peeropts,
607 607 source,
608 608 dest=sharepath,
609 609 pull=True,
610 610 revs=rev,
611 611 update=False,
612 612 stream=stream,
613 613 )
614 614
615 615 # Resolve the value to put in [paths] section for the source.
616 616 if islocal(source):
617 617 defaultpath = util.abspath(urlutil.urllocalpath(source))
618 618 else:
619 619 defaultpath = source
620 620
621 621 sharerepo = repository(ui, path=sharepath)
622 622 destrepo = share(
623 623 ui,
624 624 sharerepo,
625 625 dest=dest,
626 626 update=False,
627 627 bookmarks=False,
628 628 defaultpath=defaultpath,
629 629 )
630 630
631 631 # We need to perform a pull against the dest repo to fetch bookmarks
632 632 # and other non-store data that isn't shared by default. In the case of
633 633 # non-existing shared repo, this means we pull from the remote twice. This
634 634 # is a bit weird. But at the time it was implemented, there wasn't an easy
635 635 # way to pull just non-changegroup data.
636 636 exchange.pull(destrepo, srcpeer, heads=revs)
637 637
638 638 _postshareupdate(destrepo, update)
639 639
640 640 return srcpeer, peer(ui, peeropts, dest)
641 641
642 642
643 643 # Recomputing caches is often slow on big repos, so copy them.
644 644 def _copycache(srcrepo, dstcachedir, fname):
645 645 """copy a cache from srcrepo to destcachedir (if it exists)"""
646 646 srcfname = srcrepo.cachevfs.join(fname)
647 647 dstfname = os.path.join(dstcachedir, fname)
648 648 if os.path.exists(srcfname):
649 649 if not os.path.exists(dstcachedir):
650 650 os.mkdir(dstcachedir)
651 651 util.copyfile(srcfname, dstfname)
652 652
653 653
654 654 def clone(
655 655 ui,
656 656 peeropts,
657 657 source,
658 658 dest=None,
659 659 pull=False,
660 660 revs=None,
661 661 update=True,
662 662 stream=False,
663 663 branch=None,
664 664 shareopts=None,
665 665 storeincludepats=None,
666 666 storeexcludepats=None,
667 667 depth=None,
668 668 ):
669 669 """Make a copy of an existing repository.
670 670
671 671 Create a copy of an existing repository in a new directory. The
672 672 source and destination are URLs, as passed to the repository
673 673 function. Returns a pair of repository peers, the source and
674 674 newly created destination.
675 675
676 676 The location of the source is added to the new repository's
677 677 .hg/hgrc file, as the default to be used for future pulls and
678 678 pushes.
679 679
680 680 If an exception is raised, the partly cloned/updated destination
681 681 repository will be deleted.
682 682
683 683 Arguments:
684 684
685 685 source: repository object or URL
686 686
687 687 dest: URL of destination repository to create (defaults to base
688 688 name of source repository)
689 689
690 690 pull: always pull from source repository, even in local case or if the
691 691 server prefers streaming
692 692
693 693 stream: stream raw data uncompressed from repository (fast over
694 694 LAN, slow over WAN)
695 695
696 696 revs: revision to clone up to (implies pull=True)
697 697
698 698 update: update working directory after clone completes, if
699 699 destination is local repository (True means update to default rev,
700 700 anything else is treated as a revision)
701 701
702 702 branch: branches to clone
703 703
704 704 shareopts: dict of options to control auto sharing behavior. The "pool" key
705 705 activates auto sharing mode and defines the directory for stores. The
706 706 "mode" key determines how to construct the directory name of the shared
707 707 repository. "identity" means the name is derived from the node of the first
708 708 changeset in the repository. "remote" means the name is derived from the
709 709 remote's path/URL. Defaults to "identity."
710 710
711 711 storeincludepats and storeexcludepats: sets of file patterns to include and
712 712 exclude in the repository copy, respectively. If not defined, all files
713 713 will be included (a "full" clone). Otherwise a "narrow" clone containing
714 714 only the requested files will be performed. If ``storeincludepats`` is not
715 715 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
716 716 ``path:.``. If both are empty sets, no files will be cloned.
717 717 """
718 718
719 719 if isinstance(source, bytes):
720 720 src_path = urlutil.get_clone_path_obj(ui, source)
721 721 if src_path is None:
722 722 srcpeer = peer(ui, peeropts, b'')
723 723 origsource = source = b''
724 724 branches = (None, branch or [])
725 725 else:
726 726 srcpeer = peer(ui, peeropts, src_path)
727 727 origsource = src_path.rawloc
728 728 branches = (src_path.branch, branch or [])
729 729 source = src_path.loc
730 730 else:
731 731 if hasattr(source, 'peer'):
732 732 srcpeer = source.peer() # in case we were called with a localrepo
733 733 else:
734 734 srcpeer = source
735 735 branches = (None, branch or [])
736 736 # XXX path: simply use the peer `path` object when this become available
737 737 origsource = source = srcpeer.url()
738 738 srclock = destlock = destwlock = cleandir = None
739 739 destpeer = None
740 740 try:
741 741 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
742 742
743 743 if dest is None:
744 744 dest = defaultdest(source)
745 745 if dest:
746 746 ui.status(_(b"destination directory: %s\n") % dest)
747 747 else:
748 748 dest_path = urlutil.get_clone_path_obj(ui, dest)
749 749 if dest_path is not None:
750 750 dest = dest_path.rawloc
751 751 else:
752 752 dest = b''
753 753
754 754 dest = urlutil.urllocalpath(dest)
755 755 source = urlutil.urllocalpath(source)
756 756
757 757 if not dest:
758 758 raise error.InputError(_(b"empty destination path is not valid"))
759 759
760 760 destvfs = vfsmod.vfs(dest, expandpath=True)
761 761 if destvfs.lexists():
762 762 if not destvfs.isdir():
763 763 raise error.InputError(
764 764 _(b"destination '%s' already exists") % dest
765 765 )
766 766 elif destvfs.listdir():
767 767 raise error.InputError(
768 768 _(b"destination '%s' is not empty") % dest
769 769 )
770 770
771 771 createopts = {}
772 772 narrow = False
773 773
774 774 if storeincludepats is not None:
775 775 narrowspec.validatepatterns(storeincludepats)
776 776 narrow = True
777 777
778 778 if storeexcludepats is not None:
779 779 narrowspec.validatepatterns(storeexcludepats)
780 780 narrow = True
781 781
782 782 if narrow:
783 783 # Include everything by default if only exclusion patterns defined.
784 784 if storeexcludepats and not storeincludepats:
785 785 storeincludepats = {b'path:.'}
786 786
787 787 createopts[b'narrowfiles'] = True
788 788
789 789 if depth:
790 790 createopts[b'shallowfilestore'] = True
791 791
792 792 if srcpeer.capable(b'lfs-serve'):
793 793 # Repository creation honors the config if it disabled the extension, so
794 794 # we can't just announce that lfs will be enabled. This check avoids
795 795 # saying that lfs will be enabled, and then saying it's an unknown
796 796 # feature. The lfs creation option is set in either case so that a
797 797 # requirement is added. If the extension is explicitly disabled but the
798 798 # requirement is set, the clone aborts early, before transferring any
799 799 # data.
800 800 createopts[b'lfs'] = True
801 801
802 802 if b'lfs' in extensions.disabled():
803 803 ui.status(
804 804 _(
805 805 b'(remote is using large file support (lfs), but it is '
806 806 b'explicitly disabled in the local configuration)\n'
807 807 )
808 808 )
809 809 else:
810 810 ui.status(
811 811 _(
812 812 b'(remote is using large file support (lfs); lfs will '
813 813 b'be enabled for this repository)\n'
814 814 )
815 815 )
816 816
817 817 shareopts = shareopts or {}
818 818 sharepool = shareopts.get(b'pool')
819 819 sharenamemode = shareopts.get(b'mode')
820 820 if sharepool and islocal(dest):
821 821 sharepath = None
822 822 if sharenamemode == b'identity':
823 823 # Resolve the name from the initial changeset in the remote
824 824 # repository. This returns nullid when the remote is empty. It
825 825 # raises RepoLookupError if revision 0 is filtered or otherwise
826 826 # not available. If we fail to resolve, sharing is not enabled.
827 827 try:
828 828 with srcpeer.commandexecutor() as e:
829 829 rootnode = e.callcommand(
830 830 b'lookup',
831 831 {
832 832 b'key': b'0',
833 833 },
834 834 ).result()
835 835
836 836 if rootnode != sha1nodeconstants.nullid:
837 837 sharepath = os.path.join(sharepool, hex(rootnode))
838 838 else:
839 839 ui.status(
840 840 _(
841 841 b'(not using pooled storage: '
842 842 b'remote appears to be empty)\n'
843 843 )
844 844 )
845 845 except error.RepoLookupError:
846 846 ui.status(
847 847 _(
848 848 b'(not using pooled storage: '
849 849 b'unable to resolve identity of remote)\n'
850 850 )
851 851 )
852 852 elif sharenamemode == b'remote':
853 853 sharepath = os.path.join(
854 854 sharepool, hex(hashutil.sha1(source).digest())
855 855 )
856 856 else:
857 857 raise error.Abort(
858 858 _(b'unknown share naming mode: %s') % sharenamemode
859 859 )
860 860
861 861 # TODO this is a somewhat arbitrary restriction.
862 862 if narrow:
863 863 ui.status(
864 864 _(b'(pooled storage not supported for narrow clones)\n')
865 865 )
866 866 sharepath = None
867 867
868 868 if sharepath:
869 869 return clonewithshare(
870 870 ui,
871 871 peeropts,
872 872 sharepath,
873 873 source,
874 874 srcpeer,
875 875 dest,
876 876 pull=pull,
877 877 rev=revs,
878 878 update=update,
879 879 stream=stream,
880 880 )
881 881
882 882 srcrepo = srcpeer.local()
883 883
884 884 abspath = origsource
885 885 if islocal(origsource):
886 886 abspath = util.abspath(urlutil.urllocalpath(origsource))
887 887
888 888 if islocal(dest):
889 889 if os.path.exists(dest):
890 890 # only clean up directories we create ourselves
891 891 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
892 892 cleandir = hgdir
893 893 else:
894 894 cleandir = dest
895 895
896 896 copy = False
897 897 if (
898 898 srcrepo
899 899 and srcrepo.cancopy()
900 900 and islocal(dest)
901 901 and not phases.hassecret(srcrepo)
902 902 ):
903 903 copy = not pull and not revs
904 904
905 905 # TODO this is a somewhat arbitrary restriction.
906 906 if narrow:
907 907 copy = False
908 908
909 909 if copy:
910 910 try:
911 911 # we use a lock here because if we race with commit, we
912 912 # can end up with extra data in the cloned revlogs that's
913 913 # not pointed to by changesets, thus causing verify to
914 914 # fail
915 915 srclock = srcrepo.lock(wait=False)
916 916 except error.LockError:
917 917 copy = False
918 918
919 919 if copy:
920 920 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
921 921
922 922 destrootpath = urlutil.urllocalpath(dest)
923 923 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
924 924 localrepo.createrepository(
925 925 ui,
926 926 destrootpath,
927 927 requirements=dest_reqs,
928 928 )
929 929 destrepo = localrepo.makelocalrepository(ui, destrootpath)
930 930
931 931 destwlock = destrepo.wlock()
932 932 destlock = destrepo.lock()
933 933 from . import streamclone # avoid cycle
934 934
935 935 streamclone.local_copy(srcrepo, destrepo)
936 936
937 937 # we need to re-init the repo after manually copying the data
938 938 # into it
939 939 destpeer = peer(srcrepo, peeropts, dest)
940 940
941 941 # make the peer aware that is it already locked
942 942 #
943 943 # important:
944 944 #
945 945 # We still need to release that lock at the end of the function
946 946 destpeer.local()._lockref = weakref.ref(destlock)
947 947 destpeer.local()._wlockref = weakref.ref(destwlock)
948 948 # dirstate also needs to be copied because `_wlockref` has a reference
949 949 # to it: this dirstate is saved to disk when the wlock is released
950 950 destpeer.local().dirstate = destrepo.dirstate
951 951
952 952 srcrepo.hook(
953 953 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
954 954 )
955 955 else:
956 956 try:
957 957 # only pass ui when no srcrepo
958 958 destpeer = peer(
959 959 srcrepo or ui,
960 960 peeropts,
961 961 dest,
962 962 create=True,
963 963 createopts=createopts,
964 964 )
965 965 except FileExistsError:
966 966 cleandir = None
967 967 raise error.Abort(_(b"destination '%s' already exists") % dest)
968 968
969 969 if revs:
970 970 if not srcpeer.capable(b'lookup'):
971 971 raise error.Abort(
972 972 _(
973 973 b"src repository does not support "
974 974 b"revision lookup and so doesn't "
975 975 b"support clone by revision"
976 976 )
977 977 )
978 978
979 979 # TODO this is batchable.
980 980 remoterevs = []
981 981 for rev in revs:
982 982 with srcpeer.commandexecutor() as e:
983 983 remoterevs.append(
984 984 e.callcommand(
985 985 b'lookup',
986 986 {
987 987 b'key': rev,
988 988 },
989 989 ).result()
990 990 )
991 991 revs = remoterevs
992 992
993 993 checkout = revs[0]
994 994 else:
995 995 revs = None
996 996 local = destpeer.local()
997 997 if local:
998 998 if narrow:
999 999 with local.wlock(), local.lock(), local.transaction(
1000 1000 b'narrow-clone'
1001 1001 ):
1002 1002 local.setnarrowpats(storeincludepats, storeexcludepats)
1003 1003 narrowspec.copytoworkingcopy(local)
1004 1004
1005 1005 u = urlutil.url(abspath)
1006 1006 defaulturl = bytes(u)
1007 1007 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1008 1008 if not stream:
1009 1009 if pull:
1010 1010 stream = False
1011 1011 else:
1012 1012 stream = None
1013 1013 # internal config: ui.quietbookmarkmove
1014 1014 overrides = {(b'ui', b'quietbookmarkmove'): True}
1015 1015 with local.ui.configoverride(overrides, b'clone'):
1016 1016 exchange.pull(
1017 1017 local,
1018 1018 srcpeer,
1019 1019 heads=revs,
1020 1020 streamclonerequested=stream,
1021 1021 includepats=storeincludepats,
1022 1022 excludepats=storeexcludepats,
1023 1023 depth=depth,
1024 1024 )
1025 1025 elif srcrepo:
1026 1026 # TODO lift restriction once exchange.push() accepts narrow
1027 1027 # push.
1028 1028 if narrow:
1029 1029 raise error.Abort(
1030 1030 _(
1031 1031 b'narrow clone not available for '
1032 1032 b'remote destinations'
1033 1033 )
1034 1034 )
1035 1035
1036 1036 exchange.push(
1037 1037 srcrepo,
1038 1038 destpeer,
1039 1039 revs=revs,
1040 1040 bookmarks=srcrepo._bookmarks.keys(),
1041 1041 )
1042 1042 else:
1043 1043 raise error.Abort(
1044 1044 _(b"clone from remote to remote not supported")
1045 1045 )
1046 1046
1047 1047 cleandir = None
1048 1048
1049 1049 destrepo = destpeer.local()
1050 1050 if destrepo:
1051 1051 template = uimod.samplehgrcs[b'cloned']
1052 1052 u = urlutil.url(abspath)
1053 1053 u.passwd = None
1054 1054 defaulturl = bytes(u)
1055 1055 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1056 1056 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1057 1057
1058 1058 if ui.configbool(b'experimental', b'remotenames'):
1059 1059 logexchange.pullremotenames(destrepo, srcpeer)
1060 1060
1061 1061 if update:
1062 1062 if update is not True:
1063 1063 with srcpeer.commandexecutor() as e:
1064 1064 checkout = e.callcommand(
1065 1065 b'lookup',
1066 1066 {
1067 1067 b'key': update,
1068 1068 },
1069 1069 ).result()
1070 1070
1071 1071 uprev = None
1072 1072 status = None
1073 1073 if checkout is not None:
1074 1074 # Some extensions (at least hg-git and hg-subversion) have
1075 1075 # a peer.lookup() implementation that returns a name instead
1076 1076 # of a nodeid. We work around it here until we've figured
1077 1077 # out a better solution.
1078 1078 if len(checkout) == 20 and checkout in destrepo:
1079 1079 uprev = checkout
1080 1080 elif scmutil.isrevsymbol(destrepo, checkout):
1081 1081 uprev = scmutil.revsymbol(destrepo, checkout).node()
1082 1082 else:
1083 1083 if update is not True:
1084 1084 try:
1085 1085 uprev = destrepo.lookup(update)
1086 1086 except error.RepoLookupError:
1087 1087 pass
1088 1088 if uprev is None:
1089 1089 try:
1090 1090 if destrepo._activebookmark:
1091 1091 uprev = destrepo.lookup(destrepo._activebookmark)
1092 1092 update = destrepo._activebookmark
1093 1093 else:
1094 1094 uprev = destrepo._bookmarks[b'@']
1095 1095 update = b'@'
1096 1096 bn = destrepo[uprev].branch()
1097 1097 if bn == b'default':
1098 1098 status = _(b"updating to bookmark %s\n" % update)
1099 1099 else:
1100 1100 status = (
1101 1101 _(b"updating to bookmark %s on branch %s\n")
1102 1102 ) % (update, bn)
1103 1103 except KeyError:
1104 1104 try:
1105 1105 uprev = destrepo.branchtip(b'default')
1106 1106 except error.RepoLookupError:
1107 1107 uprev = destrepo.lookup(b'tip')
1108 1108 if not status:
1109 1109 bn = destrepo[uprev].branch()
1110 1110 status = _(b"updating to branch %s\n") % bn
1111 1111 destrepo.ui.status(status)
1112 1112 _update(destrepo, uprev)
1113 1113 if update in destrepo._bookmarks:
1114 1114 bookmarks.activate(destrepo, update)
1115 1115 if destlock is not None:
1116 1116 release(destlock)
1117 1117 if destwlock is not None:
1118 1118 release(destlock)
1119 1119 # here is a tiny windows were someone could end up writing the
1120 1120 # repository before the cache are sure to be warm. This is "fine"
1121 1121 # as the only "bad" outcome would be some slowness. That potential
1122 1122 # slowness already affect reader.
1123 1123 with destrepo.lock():
1124 1124 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1125 1125 finally:
1126 1126 release(srclock, destlock, destwlock)
1127 1127 if cleandir is not None:
1128 1128 shutil.rmtree(cleandir, True)
1129 1129 if srcpeer is not None:
1130 1130 srcpeer.close()
1131 1131 if destpeer and destpeer.local() is None:
1132 1132 destpeer.close()
1133 1133 return srcpeer, destpeer
1134 1134
1135 1135
1136 1136 def _showstats(repo, stats, quietempty=False):
1137 1137 if quietempty and stats.isempty():
1138 1138 return
1139 1139 repo.ui.status(
1140 1140 _(
1141 1141 b"%d files updated, %d files merged, "
1142 1142 b"%d files removed, %d files unresolved\n"
1143 1143 )
1144 1144 % (
1145 1145 stats.updatedcount,
1146 1146 stats.mergedcount,
1147 1147 stats.removedcount,
1148 1148 stats.unresolvedcount,
1149 1149 )
1150 1150 )
1151 1151
1152 1152
1153 1153 def updaterepo(repo, node, overwrite, updatecheck=None):
1154 1154 """Update the working directory to node.
1155 1155
1156 1156 When overwrite is set, changes are clobbered, merged else
1157 1157
1158 1158 returns stats (see pydoc mercurial.merge.applyupdates)"""
1159 1159 repo.ui.deprecwarn(
1160 1160 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1161 1161 b'5.7',
1162 1162 )
1163 1163 return mergemod._update(
1164 1164 repo,
1165 1165 node,
1166 1166 branchmerge=False,
1167 1167 force=overwrite,
1168 1168 labels=[b'working copy', b'destination'],
1169 1169 updatecheck=updatecheck,
1170 1170 )
1171 1171
1172 1172
1173 1173 def update(repo, node, quietempty=False, updatecheck=None):
1174 1174 """update the working directory to node"""
1175 1175 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1176 1176 _showstats(repo, stats, quietempty)
1177 1177 if stats.unresolvedcount:
1178 1178 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1179 1179 return stats.unresolvedcount > 0
1180 1180
1181 1181
1182 1182 # naming conflict in clone()
1183 1183 _update = update
1184 1184
1185 1185
1186 1186 def clean(repo, node, show_stats=True, quietempty=False):
1187 1187 """forcibly switch the working directory to node, clobbering changes"""
1188 1188 stats = mergemod.clean_update(repo[node])
1189 1189 assert stats.unresolvedcount == 0
1190 1190 if show_stats:
1191 1191 _showstats(repo, stats, quietempty)
1192 1192 return False
1193 1193
1194 1194
1195 1195 # naming conflict in updatetotally()
1196 1196 _clean = clean
1197 1197
1198 1198 _VALID_UPDATECHECKS = {
1199 1199 mergemod.UPDATECHECK_ABORT,
1200 1200 mergemod.UPDATECHECK_NONE,
1201 1201 mergemod.UPDATECHECK_LINEAR,
1202 1202 mergemod.UPDATECHECK_NO_CONFLICT,
1203 1203 }
1204 1204
1205 1205
1206 1206 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1207 1207 """Update the working directory with extra care for non-file components
1208 1208
1209 1209 This takes care of non-file components below:
1210 1210
1211 1211 :bookmark: might be advanced or (in)activated
1212 1212
1213 1213 This takes arguments below:
1214 1214
1215 1215 :checkout: to which revision the working directory is updated
1216 1216 :brev: a name, which might be a bookmark to be activated after updating
1217 1217 :clean: whether changes in the working directory can be discarded
1218 1218 :updatecheck: how to deal with a dirty working directory
1219 1219
1220 1220 Valid values for updatecheck are the UPDATECHECK_* constants
1221 1221 defined in the merge module. Passing `None` will result in using the
1222 1222 configured default.
1223 1223
1224 1224 * ABORT: abort if the working directory is dirty
1225 1225 * NONE: don't check (merge working directory changes into destination)
1226 1226 * LINEAR: check that update is linear before merging working directory
1227 1227 changes into destination
1228 1228 * NO_CONFLICT: check that the update does not result in file merges
1229 1229
1230 1230 This returns whether conflict is detected at updating or not.
1231 1231 """
1232 1232 if updatecheck is None:
1233 1233 updatecheck = ui.config(b'commands', b'update.check')
1234 1234 if updatecheck not in _VALID_UPDATECHECKS:
1235 1235 # If not configured, or invalid value configured
1236 1236 updatecheck = mergemod.UPDATECHECK_LINEAR
1237 1237 if updatecheck not in _VALID_UPDATECHECKS:
1238 1238 raise ValueError(
1239 1239 r'Invalid updatecheck value %r (can accept %r)'
1240 1240 % (updatecheck, _VALID_UPDATECHECKS)
1241 1241 )
1242 1242 with repo.wlock():
1243 1243 movemarkfrom = None
1244 1244 warndest = False
1245 1245 if checkout is None:
1246 1246 updata = destutil.destupdate(repo, clean=clean)
1247 1247 checkout, movemarkfrom, brev = updata
1248 1248 warndest = True
1249 1249
1250 1250 if clean:
1251 1251 ret = _clean(repo, checkout)
1252 1252 else:
1253 1253 if updatecheck == mergemod.UPDATECHECK_ABORT:
1254 1254 cmdutil.bailifchanged(repo, merge=False)
1255 1255 updatecheck = mergemod.UPDATECHECK_NONE
1256 1256 ret = _update(repo, checkout, updatecheck=updatecheck)
1257 1257
1258 1258 if not ret and movemarkfrom:
1259 1259 if movemarkfrom == repo[b'.'].node():
1260 1260 pass # no-op update
1261 1261 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1262 1262 b = ui.label(repo._activebookmark, b'bookmarks.active')
1263 1263 ui.status(_(b"updating bookmark %s\n") % b)
1264 1264 else:
1265 1265 # this can happen with a non-linear update
1266 1266 b = ui.label(repo._activebookmark, b'bookmarks')
1267 1267 ui.status(_(b"(leaving bookmark %s)\n") % b)
1268 1268 bookmarks.deactivate(repo)
1269 1269 elif brev in repo._bookmarks:
1270 1270 if brev != repo._activebookmark:
1271 1271 b = ui.label(brev, b'bookmarks.active')
1272 1272 ui.status(_(b"(activating bookmark %s)\n") % b)
1273 1273 bookmarks.activate(repo, brev)
1274 1274 elif brev:
1275 1275 if repo._activebookmark:
1276 1276 b = ui.label(repo._activebookmark, b'bookmarks')
1277 1277 ui.status(_(b"(leaving bookmark %s)\n") % b)
1278 1278 bookmarks.deactivate(repo)
1279 1279
1280 1280 if warndest:
1281 1281 destutil.statusotherdests(ui, repo)
1282 1282
1283 1283 return ret
1284 1284
1285 1285
1286 1286 def merge(
1287 1287 ctx,
1288 1288 force=False,
1289 1289 remind=True,
1290 1290 labels=None,
1291 1291 ):
1292 1292 """Branch merge with node, resolving changes. Return true if any
1293 1293 unresolved conflicts."""
1294 1294 repo = ctx.repo()
1295 1295 stats = mergemod.merge(ctx, force=force, labels=labels)
1296 1296 _showstats(repo, stats)
1297 1297 if stats.unresolvedcount:
1298 1298 repo.ui.status(
1299 1299 _(
1300 1300 b"use 'hg resolve' to retry unresolved file merges "
1301 1301 b"or 'hg merge --abort' to abandon\n"
1302 1302 )
1303 1303 )
1304 1304 elif remind:
1305 1305 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1306 1306 return stats.unresolvedcount > 0
1307 1307
1308 1308
1309 1309 def abortmerge(ui, repo):
1310 1310 ms = mergestatemod.mergestate.read(repo)
1311 1311 if ms.active():
1312 1312 # there were conflicts
1313 1313 node = ms.localctx.hex()
1314 1314 else:
1315 1315 # there were no conficts, mergestate was not stored
1316 1316 node = repo[b'.'].hex()
1317 1317
1318 1318 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1319 1319 stats = mergemod.clean_update(repo[node])
1320 1320 assert stats.unresolvedcount == 0
1321 1321 _showstats(repo, stats)
1322 1322
1323 1323
1324 1324 def _incoming(
1325 1325 displaychlist,
1326 1326 subreporecurse,
1327 1327 ui,
1328 1328 repo,
1329 1329 source,
1330 1330 opts,
1331 1331 buffered=False,
1332 1332 subpath=None,
1333 1333 ):
1334 1334 """
1335 1335 Helper for incoming / gincoming.
1336 1336 displaychlist gets called with
1337 1337 (remoterepo, incomingchangesetlist, displayer) parameters,
1338 1338 and is supposed to contain only code that can't be unified.
1339 1339 """
1340 1340 srcs = urlutil.get_pull_paths(repo, ui, [source])
1341 1341 srcs = list(srcs)
1342 1342 if len(srcs) != 1:
1343 1343 msg = _(b'for now, incoming supports only a single source, %d provided')
1344 1344 msg %= len(srcs)
1345 1345 raise error.Abort(msg)
1346 1346 path = srcs[0]
1347 1347 if subpath is None:
1348 1348 peer_path = path
1349 1349 url = path.loc
1350 1350 else:
1351 1351 # XXX path: we are losing the `path` object here. Keeping it would be
1352 1352 # valuable. For example as a "variant" as we do for pushes.
1353 1353 subpath = urlutil.url(subpath)
1354 1354 if subpath.isabs():
1355 1355 peer_path = url = bytes(subpath)
1356 1356 else:
1357 1357 p = urlutil.url(path.loc)
1358 1358 if p.islocal():
1359 1359 normpath = os.path.normpath
1360 1360 else:
1361 1361 normpath = posixpath.normpath
1362 1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 1363 peer_path = url = bytes(p)
1364 1364 other = peer(repo, opts, peer_path)
1365 1365 cleanupfn = other.close
1366 1366 try:
1367 1367 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1368 1368 branches = (path.branch, opts.get(b'branch', []))
1369 1369 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1370 1370
1371 1371 if revs:
1372 1372 revs = [other.lookup(rev) for rev in revs]
1373 1373 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1374 1374 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1375 1375 )
1376 1376
1377 1377 if not chlist:
1378 1378 ui.status(_(b"no changes found\n"))
1379 1379 return subreporecurse()
1380 1380 ui.pager(b'incoming')
1381 1381 displayer = logcmdutil.changesetdisplayer(
1382 1382 ui, other, opts, buffered=buffered
1383 1383 )
1384 1384 displaychlist(other, chlist, displayer)
1385 1385 displayer.close()
1386 1386 finally:
1387 1387 cleanupfn()
1388 1388 subreporecurse()
1389 1389 return 0 # exit code is zero since we found incoming changes
1390 1390
1391 1391
1392 1392 def incoming(ui, repo, source, opts, subpath=None):
1393 1393 def subreporecurse():
1394 1394 ret = 1
1395 1395 if opts.get(b'subrepos'):
1396 1396 ctx = repo[None]
1397 1397 for subpath in sorted(ctx.substate):
1398 1398 sub = ctx.sub(subpath)
1399 1399 ret = min(ret, sub.incoming(ui, source, opts))
1400 1400 return ret
1401 1401
1402 1402 def display(other, chlist, displayer):
1403 1403 limit = logcmdutil.getlimit(opts)
1404 1404 if opts.get(b'newest_first'):
1405 1405 chlist.reverse()
1406 1406 count = 0
1407 1407 for n in chlist:
1408 1408 if limit is not None and count >= limit:
1409 1409 break
1410 1410 parents = [
1411 1411 p for p in other.changelog.parents(n) if p != repo.nullid
1412 1412 ]
1413 1413 if opts.get(b'no_merges') and len(parents) == 2:
1414 1414 continue
1415 1415 count += 1
1416 1416 displayer.show(other[n])
1417 1417
1418 1418 return _incoming(
1419 1419 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1420 1420 )
1421 1421
1422 1422
1423 1423 def _outgoing(ui, repo, dests, opts, subpath=None):
1424 1424 out = set()
1425 1425 others = []
1426 1426 for path in urlutil.get_push_paths(repo, ui, dests):
1427 1427 dest = path.loc
1428 repo._subtoppath = dest
1428 1429 if subpath is not None:
1429 1430 subpath = urlutil.url(subpath)
1430 1431 if subpath.isabs():
1431 1432 dest = bytes(subpath)
1432 1433 else:
1433 1434 p = urlutil.url(dest)
1434 1435 if p.islocal():
1435 1436 normpath = os.path.normpath
1436 1437 else:
1437 1438 normpath = posixpath.normpath
1438 1439 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 1440 dest = bytes(p)
1440 1441 branches = path.branch, opts.get(b'branch') or []
1441 1442
1442 1443 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 1444 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 1445 if revs:
1445 1446 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446 1447
1447 1448 other = peer(repo, opts, dest)
1448 1449 try:
1449 1450 outgoing = discovery.findcommonoutgoing(
1450 1451 repo, other, revs, force=opts.get(b'force')
1451 1452 )
1452 1453 o = outgoing.missing
1453 1454 out.update(o)
1454 1455 if not o:
1455 1456 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 1457 others.append(other)
1457 1458 except: # re-raises
1458 1459 other.close()
1459 1460 raise
1460 1461 # make sure this is ordered by revision number
1461 1462 outgoing_revs = list(out)
1462 1463 cl = repo.changelog
1463 1464 outgoing_revs.sort(key=cl.rev)
1464 1465 return outgoing_revs, others
1465 1466
1466 1467
1467 1468 def _outgoing_recurse(ui, repo, dests, opts):
1468 1469 ret = 1
1469 1470 if opts.get(b'subrepos'):
1470 1471 ctx = repo[None]
1471 1472 for subpath in sorted(ctx.substate):
1472 1473 sub = ctx.sub(subpath)
1473 1474 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 1475 return ret
1475 1476
1476 1477
1477 1478 def _outgoing_filter(repo, revs, opts):
1478 1479 """apply revision filtering/ordering option for outgoing"""
1479 1480 limit = logcmdutil.getlimit(opts)
1480 1481 no_merges = opts.get(b'no_merges')
1481 1482 if opts.get(b'newest_first'):
1482 1483 revs.reverse()
1483 1484 if limit is None and not no_merges:
1484 1485 for r in revs:
1485 1486 yield r
1486 1487 return
1487 1488
1488 1489 count = 0
1489 1490 cl = repo.changelog
1490 1491 for n in revs:
1491 1492 if limit is not None and count >= limit:
1492 1493 break
1493 1494 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 1495 if no_merges and len(parents) == 2:
1495 1496 continue
1496 1497 count += 1
1497 1498 yield n
1498 1499
1499 1500
1500 1501 def outgoing(ui, repo, dests, opts, subpath=None):
1501 1502 if opts.get(b'graph'):
1502 1503 logcmdutil.checkunsupportedgraphflags([], opts)
1503 1504 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 1505 ret = 1
1505 1506 try:
1506 1507 if o:
1507 1508 ret = 0
1508 1509
1509 1510 if opts.get(b'graph'):
1510 1511 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 1512 ui.pager(b'outgoing')
1512 1513 displayer = logcmdutil.changesetdisplayer(
1513 1514 ui, repo, opts, buffered=True
1514 1515 )
1515 1516 logcmdutil.displaygraph(
1516 1517 ui, repo, revdag, displayer, graphmod.asciiedges
1517 1518 )
1518 1519 else:
1519 1520 ui.pager(b'outgoing')
1520 1521 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 1522 for n in _outgoing_filter(repo, o, opts):
1522 1523 displayer.show(repo[n])
1523 1524 displayer.close()
1524 1525 for oth in others:
1525 1526 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 1527 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 1528 return ret # exit code is zero since we found outgoing changes
1528 1529 finally:
1529 1530 for oth in others:
1530 1531 oth.close()
1532 del repo._subtoppath
1531 1533
1532 1534
1533 1535 def verify(repo, level=None):
1534 1536 """verify the consistency of a repository"""
1535 1537 ret = verifymod.verify(repo, level=level)
1536 1538
1537 1539 # Broken subrepo references in hidden csets don't seem worth worrying about,
1538 1540 # since they can't be pushed/pulled, and --hidden can be used if they are a
1539 1541 # concern.
1540 1542
1541 1543 # pathto() is needed for -R case
1542 1544 revs = repo.revs(
1543 1545 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1544 1546 )
1545 1547
1546 1548 if revs:
1547 1549 repo.ui.status(_(b'checking subrepo links\n'))
1548 1550 for rev in revs:
1549 1551 ctx = repo[rev]
1550 1552 try:
1551 1553 for subpath in ctx.substate:
1552 1554 try:
1553 1555 ret = (
1554 1556 ctx.sub(subpath, allowcreate=False).verify() or ret
1555 1557 )
1556 1558 except error.RepoError as e:
1557 1559 repo.ui.warn(b'%d: %s\n' % (rev, e))
1558 1560 except Exception:
1559 1561 repo.ui.warn(
1560 1562 _(b'.hgsubstate is corrupt in revision %s\n')
1561 1563 % short(ctx.node())
1562 1564 )
1563 1565
1564 1566 return ret
1565 1567
1566 1568
1567 1569 def remoteui(src, opts):
1568 1570 """build a remote ui from ui or repo and opts"""
1569 1571 if hasattr(src, 'baseui'): # looks like a repository
1570 1572 dst = src.baseui.copy() # drop repo-specific config
1571 1573 src = src.ui # copy target options from repo
1572 1574 else: # assume it's a global ui object
1573 1575 dst = src.copy() # keep all global options
1574 1576
1575 1577 # copy ssh-specific options
1576 1578 for o in b'ssh', b'remotecmd':
1577 1579 v = opts.get(o) or src.config(b'ui', o)
1578 1580 if v:
1579 1581 dst.setconfig(b"ui", o, v, b'copied')
1580 1582
1581 1583 # copy bundle-specific options
1582 1584 r = src.config(b'bundle', b'mainreporoot')
1583 1585 if r:
1584 1586 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1585 1587
1586 1588 # copy selected local settings to the remote ui
1587 1589 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1588 1590 for key, val in src.configitems(sect):
1589 1591 dst.setconfig(sect, key, val, b'copied')
1590 1592 v = src.config(b'web', b'cacerts')
1591 1593 if v:
1592 1594 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1593 1595
1594 1596 return dst
1595 1597
1596 1598
1597 1599 # Files of interest
1598 1600 # Used to check if the repository has changed looking at mtime and size of
1599 1601 # these files.
1600 1602 foi = [
1601 1603 ('spath', b'00changelog.i'),
1602 1604 ('spath', b'phaseroots'), # ! phase can change content at the same size
1603 1605 ('spath', b'obsstore'),
1604 1606 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1605 1607 ]
1606 1608
1607 1609
1608 1610 class cachedlocalrepo:
1609 1611 """Holds a localrepository that can be cached and reused."""
1610 1612
1611 1613 def __init__(self, repo):
1612 1614 """Create a new cached repo from an existing repo.
1613 1615
1614 1616 We assume the passed in repo was recently created. If the
1615 1617 repo has changed between when it was created and when it was
1616 1618 turned into a cache, it may not refresh properly.
1617 1619 """
1618 1620 assert isinstance(repo, localrepo.localrepository)
1619 1621 self._repo = repo
1620 1622 self._state, self.mtime = self._repostate()
1621 1623 self._filtername = repo.filtername
1622 1624
1623 1625 def fetch(self):
1624 1626 """Refresh (if necessary) and return a repository.
1625 1627
1626 1628 If the cached instance is out of date, it will be recreated
1627 1629 automatically and returned.
1628 1630
1629 1631 Returns a tuple of the repo and a boolean indicating whether a new
1630 1632 repo instance was created.
1631 1633 """
1632 1634 # We compare the mtimes and sizes of some well-known files to
1633 1635 # determine if the repo changed. This is not precise, as mtimes
1634 1636 # are susceptible to clock skew and imprecise filesystems and
1635 1637 # file content can change while maintaining the same size.
1636 1638
1637 1639 state, mtime = self._repostate()
1638 1640 if state == self._state:
1639 1641 return self._repo, False
1640 1642
1641 1643 repo = repository(self._repo.baseui, self._repo.url())
1642 1644 if self._filtername:
1643 1645 self._repo = repo.filtered(self._filtername)
1644 1646 else:
1645 1647 self._repo = repo.unfiltered()
1646 1648 self._state = state
1647 1649 self.mtime = mtime
1648 1650
1649 1651 return self._repo, True
1650 1652
1651 1653 def _repostate(self):
1652 1654 state = []
1653 1655 maxmtime = -1
1654 1656 for attr, fname in foi:
1655 1657 prefix = getattr(self._repo, attr)
1656 1658 p = os.path.join(prefix, fname)
1657 1659 try:
1658 1660 st = os.stat(p)
1659 1661 except OSError:
1660 1662 st = os.stat(prefix)
1661 1663 state.append((st[stat.ST_MTIME], st.st_size))
1662 1664 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1663 1665
1664 1666 return tuple(state), maxmtime
1665 1667
1666 1668 def copy(self):
1667 1669 """Obtain a copy of this class instance.
1668 1670
1669 1671 A new localrepository instance is obtained. The new instance should be
1670 1672 completely independent of the original.
1671 1673 """
1672 1674 repo = repository(self._repo.baseui, self._repo.origroot)
1673 1675 if self._filtername:
1674 1676 repo = repo.filtered(self._filtername)
1675 1677 else:
1676 1678 repo = repo.unfiltered()
1677 1679 c = cachedlocalrepo(repo)
1678 1680 c._state = self._state
1679 1681 c.mtime = self.mtime
1680 1682 return c
General Comments 0
You need to be logged in to leave comments. Login now