##// END OF EJS Templates
clone: use `get_clone_path`...
marmoute -
r47713:338ab1d8 default
parent child Browse files
Show More
@@ -1,1597 +1,1597 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .pycompat import getattr
24 24
25 25 from . import (
26 26 bookmarks,
27 27 bundlerepo,
28 28 cacheutil,
29 29 cmdutil,
30 30 destutil,
31 31 discovery,
32 32 error,
33 33 exchange,
34 34 extensions,
35 35 graphmod,
36 36 httppeer,
37 37 localrepo,
38 38 lock,
39 39 logcmdutil,
40 40 logexchange,
41 41 merge as mergemod,
42 42 mergestate as mergestatemod,
43 43 narrowspec,
44 44 phases,
45 45 requirements,
46 46 scmutil,
47 47 sshpeer,
48 48 statichttprepo,
49 49 ui as uimod,
50 50 unionrepo,
51 51 url,
52 52 util,
53 53 verify as verifymod,
54 54 vfs as vfsmod,
55 55 )
56 56 from .utils import (
57 57 hashutil,
58 58 stringutil,
59 59 urlutil,
60 60 )
61 61
62 62
63 63 release = lock.release
64 64
65 65 # shared features
66 66 sharedbookmarks = b'bookmarks'
67 67
68 68
69 69 def _local(path):
70 70 path = util.expandpath(urlutil.urllocalpath(path))
71 71
72 72 try:
73 73 # we use os.stat() directly here instead of os.path.isfile()
74 74 # because the latter started returning `False` on invalid path
75 75 # exceptions starting in 3.8 and we care about handling
76 76 # invalid paths specially here.
77 77 st = os.stat(path)
78 78 isfile = stat.S_ISREG(st.st_mode)
79 79 # Python 2 raises TypeError, Python 3 ValueError.
80 80 except (TypeError, ValueError) as e:
81 81 raise error.Abort(
82 82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 83 )
84 84 except OSError:
85 85 isfile = False
86 86
87 87 return isfile and bundlerepo or localrepo
88 88
89 89
90 90 def addbranchrevs(lrepo, other, branches, revs):
91 91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 92 hashbranch, branches = branches
93 93 if not hashbranch and not branches:
94 94 x = revs or None
95 95 if revs:
96 96 y = revs[0]
97 97 else:
98 98 y = None
99 99 return x, y
100 100 if revs:
101 101 revs = list(revs)
102 102 else:
103 103 revs = []
104 104
105 105 if not peer.capable(b'branchmap'):
106 106 if branches:
107 107 raise error.Abort(_(b"remote branch lookup not supported"))
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111 with peer.commandexecutor() as e:
112 112 branchmap = e.callcommand(b'branchmap', {}).result()
113 113
114 114 def primary(branch):
115 115 if branch == b'.':
116 116 if not lrepo:
117 117 raise error.Abort(_(b"dirstate branch not accessible"))
118 118 branch = lrepo.dirstate.branch()
119 119 if branch in branchmap:
120 120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 121 return True
122 122 else:
123 123 return False
124 124
125 125 for branch in branches:
126 126 if not primary(branch):
127 127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 128 if hashbranch:
129 129 if not primary(hashbranch):
130 130 revs.append(hashbranch)
131 131 return revs, revs[0]
132 132
133 133
134 134 def parseurl(path, branches=None):
135 135 '''parse url#branch, returning (url, (branch, branches))'''
136 136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 138 return urlutil.parseurl(path, branches=branches)
139 139
140 140
141 141 schemes = {
142 142 b'bundle': bundlerepo,
143 143 b'union': unionrepo,
144 144 b'file': _local,
145 145 b'http': httppeer,
146 146 b'https': httppeer,
147 147 b'ssh': sshpeer,
148 148 b'static-http': statichttprepo,
149 149 }
150 150
151 151
152 152 def _peerlookup(path):
153 153 u = urlutil.url(path)
154 154 scheme = u.scheme or b'file'
155 155 thing = schemes.get(scheme) or schemes[b'file']
156 156 try:
157 157 return thing(path)
158 158 except TypeError:
159 159 # we can't test callable(thing) because 'thing' can be an unloaded
160 160 # module that implements __call__
161 161 if not util.safehasattr(thing, b'instance'):
162 162 raise
163 163 return thing
164 164
165 165
166 166 def islocal(repo):
167 167 '''return true if repo (or path pointing to repo) is local'''
168 168 if isinstance(repo, bytes):
169 169 try:
170 170 return _peerlookup(repo).islocal(repo)
171 171 except AttributeError:
172 172 return False
173 173 return repo.local()
174 174
175 175
176 176 def openpath(ui, path, sendaccept=True):
177 177 '''open path with open if local, url.open if remote'''
178 178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 179 if pathurl.islocal():
180 180 return util.posixfile(pathurl.localpath(), b'rb')
181 181 else:
182 182 return url.open(ui, path, sendaccept=sendaccept)
183 183
184 184
185 185 # a list of (ui, repo) functions called for wire peer initialization
186 186 wirepeersetupfuncs = []
187 187
188 188
189 189 def _peerorrepo(
190 190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 191 ):
192 192 """return a repository object for the specified path"""
193 193 obj = _peerlookup(path).instance(
194 194 ui, path, create, intents=intents, createopts=createopts
195 195 )
196 196 ui = getattr(obj, "ui", ui)
197 197 for f in presetupfuncs or []:
198 198 f(ui, obj)
199 199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 200 with util.timedcm('all reposetup') as allreposetupstats:
201 201 for name, module in extensions.extensions(ui):
202 202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 203 hook = getattr(module, 'reposetup', None)
204 204 if hook:
205 205 with util.timedcm('reposetup %r', name) as stats:
206 206 hook(ui, obj)
207 207 ui.log(
208 208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 209 )
210 210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 211 if not obj.local():
212 212 for f in wirepeersetupfuncs:
213 213 f(ui, obj)
214 214 return obj
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 peer = _peerorrepo(
227 227 ui,
228 228 path,
229 229 create,
230 230 presetupfuncs=presetupfuncs,
231 231 intents=intents,
232 232 createopts=createopts,
233 233 )
234 234 repo = peer.local()
235 235 if not repo:
236 236 raise error.Abort(
237 237 _(b"repository '%s' is not local") % (path or peer.url())
238 238 )
239 239 return repo.filtered(b'visible')
240 240
241 241
242 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 243 '''return a repository peer for the specified path'''
244 244 rui = remoteui(uiorrepo, opts)
245 245 return _peerorrepo(
246 246 rui, path, create, intents=intents, createopts=createopts
247 247 ).peer()
248 248
249 249
250 250 def defaultdest(source):
251 251 """return default destination of clone if none is given
252 252
253 253 >>> defaultdest(b'foo')
254 254 'foo'
255 255 >>> defaultdest(b'/foo/bar')
256 256 'bar'
257 257 >>> defaultdest(b'/')
258 258 ''
259 259 >>> defaultdest(b'')
260 260 ''
261 261 >>> defaultdest(b'http://example.org/')
262 262 ''
263 263 >>> defaultdest(b'http://example.org/foo/')
264 264 'foo'
265 265 """
266 266 path = urlutil.url(source).path
267 267 if not path:
268 268 return b''
269 269 return os.path.basename(os.path.normpath(path))
270 270
271 271
272 272 def sharedreposource(repo):
273 273 """Returns repository object for source repository of a shared repo.
274 274
275 275 If repo is not a shared repository, returns None.
276 276 """
277 277 if repo.sharedpath == repo.path:
278 278 return None
279 279
280 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 281 return repo.srcrepo
282 282
283 283 # the sharedpath always ends in the .hg; we want the path to the repo
284 284 source = repo.vfs.split(repo.sharedpath)[0]
285 285 srcurl, branches = urlutil.parseurl(source)
286 286 srcrepo = repository(repo.ui, srcurl)
287 287 repo.srcrepo = srcrepo
288 288 return srcrepo
289 289
290 290
291 291 def share(
292 292 ui,
293 293 source,
294 294 dest=None,
295 295 update=True,
296 296 bookmarks=True,
297 297 defaultpath=None,
298 298 relative=False,
299 299 ):
300 300 '''create a shared repository'''
301 301
302 302 if not islocal(source):
303 303 raise error.Abort(_(b'can only share local repositories'))
304 304
305 305 if not dest:
306 306 dest = defaultdest(source)
307 307 else:
308 308 dest = ui.expandpath(dest)
309 309
310 310 if isinstance(source, bytes):
311 311 origsource = ui.expandpath(source)
312 312 source, branches = urlutil.parseurl(origsource)
313 313 srcrepo = repository(ui, source)
314 314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
315 315 else:
316 316 srcrepo = source.local()
317 317 checkout = None
318 318
319 319 shareditems = set()
320 320 if bookmarks:
321 321 shareditems.add(sharedbookmarks)
322 322
323 323 r = repository(
324 324 ui,
325 325 dest,
326 326 create=True,
327 327 createopts={
328 328 b'sharedrepo': srcrepo,
329 329 b'sharedrelative': relative,
330 330 b'shareditems': shareditems,
331 331 },
332 332 )
333 333
334 334 postshare(srcrepo, r, defaultpath=defaultpath)
335 335 r = repository(ui, dest)
336 336 _postshareupdate(r, update, checkout=checkout)
337 337 return r
338 338
339 339
340 340 def _prependsourcehgrc(repo):
341 341 """copies the source repo config and prepend it in current repo .hg/hgrc
342 342 on unshare. This is only done if the share was perfomed using share safe
343 343 method where we share config of source in shares"""
344 344 srcvfs = vfsmod.vfs(repo.sharedpath)
345 345 dstvfs = vfsmod.vfs(repo.path)
346 346
347 347 if not srcvfs.exists(b'hgrc'):
348 348 return
349 349
350 350 currentconfig = b''
351 351 if dstvfs.exists(b'hgrc'):
352 352 currentconfig = dstvfs.read(b'hgrc')
353 353
354 354 with dstvfs(b'hgrc', b'wb') as fp:
355 355 sourceconfig = srcvfs.read(b'hgrc')
356 356 fp.write(b"# Config copied from shared source\n")
357 357 fp.write(sourceconfig)
358 358 fp.write(b'\n')
359 359 fp.write(currentconfig)
360 360
361 361
362 362 def unshare(ui, repo):
363 363 """convert a shared repository to a normal one
364 364
365 365 Copy the store data to the repo and remove the sharedpath data.
366 366
367 367 Returns a new repository object representing the unshared repository.
368 368
369 369 The passed repository object is not usable after this function is
370 370 called.
371 371 """
372 372
373 373 with repo.lock():
374 374 # we use locks here because if we race with commit, we
375 375 # can end up with extra data in the cloned revlogs that's
376 376 # not pointed to by changesets, thus causing verify to
377 377 # fail
378 378 destlock = copystore(ui, repo, repo.path)
379 379 with destlock or util.nullcontextmanager():
380 380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
381 381 # we were sharing .hg/hgrc of the share source with the current
382 382 # repo. We need to copy that while unsharing otherwise it can
383 383 # disable hooks and other checks
384 384 _prependsourcehgrc(repo)
385 385
386 386 sharefile = repo.vfs.join(b'sharedpath')
387 387 util.rename(sharefile, sharefile + b'.old')
388 388
389 389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
390 390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
391 391 scmutil.writereporequirements(repo)
392 392
393 393 # Removing share changes some fundamental properties of the repo instance.
394 394 # So we instantiate a new repo object and operate on it rather than
395 395 # try to keep the existing repo usable.
396 396 newrepo = repository(repo.baseui, repo.root, create=False)
397 397
398 398 # TODO: figure out how to access subrepos that exist, but were previously
399 399 # removed from .hgsub
400 400 c = newrepo[b'.']
401 401 subs = c.substate
402 402 for s in sorted(subs):
403 403 c.sub(s).unshare()
404 404
405 405 localrepo.poisonrepository(repo)
406 406
407 407 return newrepo
408 408
409 409
410 410 def postshare(sourcerepo, destrepo, defaultpath=None):
411 411 """Called after a new shared repo is created.
412 412
413 413 The new repo only has a requirements file and pointer to the source.
414 414 This function configures additional shared data.
415 415
416 416 Extensions can wrap this function and write additional entries to
417 417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
418 418 """
419 419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
420 420 if default:
421 421 template = b'[paths]\ndefault = %s\n'
422 422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
423 423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
424 424 with destrepo.wlock():
425 425 narrowspec.copytoworkingcopy(destrepo)
426 426
427 427
428 428 def _postshareupdate(repo, update, checkout=None):
429 429 """Maybe perform a working directory update after a shared repo is created.
430 430
431 431 ``update`` can be a boolean or a revision to update to.
432 432 """
433 433 if not update:
434 434 return
435 435
436 436 repo.ui.status(_(b"updating working directory\n"))
437 437 if update is not True:
438 438 checkout = update
439 439 for test in (checkout, b'default', b'tip'):
440 440 if test is None:
441 441 continue
442 442 try:
443 443 uprev = repo.lookup(test)
444 444 break
445 445 except error.RepoLookupError:
446 446 continue
447 447 _update(repo, uprev)
448 448
449 449
450 450 def copystore(ui, srcrepo, destpath):
451 451 """copy files from store of srcrepo in destpath
452 452
453 453 returns destlock
454 454 """
455 455 destlock = None
456 456 try:
457 457 hardlink = None
458 458 topic = _(b'linking') if hardlink else _(b'copying')
459 459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
460 460 num = 0
461 461 srcpublishing = srcrepo.publishing()
462 462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
463 463 dstvfs = vfsmod.vfs(destpath)
464 464 for f in srcrepo.store.copylist():
465 465 if srcpublishing and f.endswith(b'phaseroots'):
466 466 continue
467 467 dstbase = os.path.dirname(f)
468 468 if dstbase and not dstvfs.exists(dstbase):
469 469 dstvfs.mkdir(dstbase)
470 470 if srcvfs.exists(f):
471 471 if f.endswith(b'data'):
472 472 # 'dstbase' may be empty (e.g. revlog format 0)
473 473 lockfile = os.path.join(dstbase, b"lock")
474 474 # lock to avoid premature writing to the target
475 475 destlock = lock.lock(dstvfs, lockfile)
476 476 hardlink, n = util.copyfiles(
477 477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
478 478 )
479 479 num += n
480 480 if hardlink:
481 481 ui.debug(b"linked %d files\n" % num)
482 482 else:
483 483 ui.debug(b"copied %d files\n" % num)
484 484 return destlock
485 485 except: # re-raises
486 486 release(destlock)
487 487 raise
488 488
489 489
490 490 def clonewithshare(
491 491 ui,
492 492 peeropts,
493 493 sharepath,
494 494 source,
495 495 srcpeer,
496 496 dest,
497 497 pull=False,
498 498 rev=None,
499 499 update=True,
500 500 stream=False,
501 501 ):
502 502 """Perform a clone using a shared repo.
503 503
504 504 The store for the repository will be located at <sharepath>/.hg. The
505 505 specified revisions will be cloned or pulled from "source". A shared repo
506 506 will be created at "dest" and a working copy will be created if "update" is
507 507 True.
508 508 """
509 509 revs = None
510 510 if rev:
511 511 if not srcpeer.capable(b'lookup'):
512 512 raise error.Abort(
513 513 _(
514 514 b"src repository does not support "
515 515 b"revision lookup and so doesn't "
516 516 b"support clone by revision"
517 517 )
518 518 )
519 519
520 520 # TODO this is batchable.
521 521 remoterevs = []
522 522 for r in rev:
523 523 with srcpeer.commandexecutor() as e:
524 524 remoterevs.append(
525 525 e.callcommand(
526 526 b'lookup',
527 527 {
528 528 b'key': r,
529 529 },
530 530 ).result()
531 531 )
532 532 revs = remoterevs
533 533
534 534 # Obtain a lock before checking for or cloning the pooled repo otherwise
535 535 # 2 clients may race creating or populating it.
536 536 pooldir = os.path.dirname(sharepath)
537 537 # lock class requires the directory to exist.
538 538 try:
539 539 util.makedir(pooldir, False)
540 540 except OSError as e:
541 541 if e.errno != errno.EEXIST:
542 542 raise
543 543
544 544 poolvfs = vfsmod.vfs(pooldir)
545 545 basename = os.path.basename(sharepath)
546 546
547 547 with lock.lock(poolvfs, b'%s.lock' % basename):
548 548 if os.path.exists(sharepath):
549 549 ui.status(
550 550 _(b'(sharing from existing pooled repository %s)\n') % basename
551 551 )
552 552 else:
553 553 ui.status(
554 554 _(b'(sharing from new pooled repository %s)\n') % basename
555 555 )
556 556 # Always use pull mode because hardlinks in share mode don't work
557 557 # well. Never update because working copies aren't necessary in
558 558 # share mode.
559 559 clone(
560 560 ui,
561 561 peeropts,
562 562 source,
563 563 dest=sharepath,
564 564 pull=True,
565 565 revs=rev,
566 566 update=False,
567 567 stream=stream,
568 568 )
569 569
570 570 # Resolve the value to put in [paths] section for the source.
571 571 if islocal(source):
572 572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
573 573 else:
574 574 defaultpath = source
575 575
576 576 sharerepo = repository(ui, path=sharepath)
577 577 destrepo = share(
578 578 ui,
579 579 sharerepo,
580 580 dest=dest,
581 581 update=False,
582 582 bookmarks=False,
583 583 defaultpath=defaultpath,
584 584 )
585 585
586 586 # We need to perform a pull against the dest repo to fetch bookmarks
587 587 # and other non-store data that isn't shared by default. In the case of
588 588 # non-existing shared repo, this means we pull from the remote twice. This
589 589 # is a bit weird. But at the time it was implemented, there wasn't an easy
590 590 # way to pull just non-changegroup data.
591 591 exchange.pull(destrepo, srcpeer, heads=revs)
592 592
593 593 _postshareupdate(destrepo, update)
594 594
595 595 return srcpeer, peer(ui, peeropts, dest)
596 596
597 597
598 598 # Recomputing caches is often slow on big repos, so copy them.
599 599 def _copycache(srcrepo, dstcachedir, fname):
600 600 """copy a cache from srcrepo to destcachedir (if it exists)"""
601 601 srcfname = srcrepo.cachevfs.join(fname)
602 602 dstfname = os.path.join(dstcachedir, fname)
603 603 if os.path.exists(srcfname):
604 604 if not os.path.exists(dstcachedir):
605 605 os.mkdir(dstcachedir)
606 606 util.copyfile(srcfname, dstfname)
607 607
608 608
609 609 def clone(
610 610 ui,
611 611 peeropts,
612 612 source,
613 613 dest=None,
614 614 pull=False,
615 615 revs=None,
616 616 update=True,
617 617 stream=False,
618 618 branch=None,
619 619 shareopts=None,
620 620 storeincludepats=None,
621 621 storeexcludepats=None,
622 622 depth=None,
623 623 ):
624 624 """Make a copy of an existing repository.
625 625
626 626 Create a copy of an existing repository in a new directory. The
627 627 source and destination are URLs, as passed to the repository
628 628 function. Returns a pair of repository peers, the source and
629 629 newly created destination.
630 630
631 631 The location of the source is added to the new repository's
632 632 .hg/hgrc file, as the default to be used for future pulls and
633 633 pushes.
634 634
635 635 If an exception is raised, the partly cloned/updated destination
636 636 repository will be deleted.
637 637
638 638 Arguments:
639 639
640 640 source: repository object or URL
641 641
642 642 dest: URL of destination repository to create (defaults to base
643 643 name of source repository)
644 644
645 645 pull: always pull from source repository, even in local case or if the
646 646 server prefers streaming
647 647
648 648 stream: stream raw data uncompressed from repository (fast over
649 649 LAN, slow over WAN)
650 650
651 651 revs: revision to clone up to (implies pull=True)
652 652
653 653 update: update working directory after clone completes, if
654 654 destination is local repository (True means update to default rev,
655 655 anything else is treated as a revision)
656 656
657 657 branch: branches to clone
658 658
659 659 shareopts: dict of options to control auto sharing behavior. The "pool" key
660 660 activates auto sharing mode and defines the directory for stores. The
661 661 "mode" key determines how to construct the directory name of the shared
662 662 repository. "identity" means the name is derived from the node of the first
663 663 changeset in the repository. "remote" means the name is derived from the
664 664 remote's path/URL. Defaults to "identity."
665 665
666 666 storeincludepats and storeexcludepats: sets of file patterns to include and
667 667 exclude in the repository copy, respectively. If not defined, all files
668 668 will be included (a "full" clone). Otherwise a "narrow" clone containing
669 669 only the requested files will be performed. If ``storeincludepats`` is not
670 670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
671 671 ``path:.``. If both are empty sets, no files will be cloned.
672 672 """
673 673
674 674 if isinstance(source, bytes):
675 675 src = urlutil.get_clone_path(ui, source, branch)
676 676 origsource, source, branches = src
677 677 srcpeer = peer(ui, peeropts, source)
678 678 else:
679 679 srcpeer = source.peer() # in case we were called with a localrepo
680 680 branches = (None, branch or [])
681 681 origsource = source = srcpeer.url()
682 682 srclock = destlock = cleandir = None
683 683 destpeer = None
684 684 try:
685 685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
686 686
687 687 if dest is None:
688 688 dest = defaultdest(source)
689 689 if dest:
690 690 ui.status(_(b"destination directory: %s\n") % dest)
691 691 else:
692 dest = ui.expandpath(dest)
692 dest = urlutil.get_clone_path(ui, dest)[0]
693 693
694 694 dest = urlutil.urllocalpath(dest)
695 695 source = urlutil.urllocalpath(source)
696 696
697 697 if not dest:
698 698 raise error.InputError(_(b"empty destination path is not valid"))
699 699
700 700 destvfs = vfsmod.vfs(dest, expandpath=True)
701 701 if destvfs.lexists():
702 702 if not destvfs.isdir():
703 703 raise error.InputError(
704 704 _(b"destination '%s' already exists") % dest
705 705 )
706 706 elif destvfs.listdir():
707 707 raise error.InputError(
708 708 _(b"destination '%s' is not empty") % dest
709 709 )
710 710
711 711 createopts = {}
712 712 narrow = False
713 713
714 714 if storeincludepats is not None:
715 715 narrowspec.validatepatterns(storeincludepats)
716 716 narrow = True
717 717
718 718 if storeexcludepats is not None:
719 719 narrowspec.validatepatterns(storeexcludepats)
720 720 narrow = True
721 721
722 722 if narrow:
723 723 # Include everything by default if only exclusion patterns defined.
724 724 if storeexcludepats and not storeincludepats:
725 725 storeincludepats = {b'path:.'}
726 726
727 727 createopts[b'narrowfiles'] = True
728 728
729 729 if depth:
730 730 createopts[b'shallowfilestore'] = True
731 731
732 732 if srcpeer.capable(b'lfs-serve'):
733 733 # Repository creation honors the config if it disabled the extension, so
734 734 # we can't just announce that lfs will be enabled. This check avoids
735 735 # saying that lfs will be enabled, and then saying it's an unknown
736 736 # feature. The lfs creation option is set in either case so that a
737 737 # requirement is added. If the extension is explicitly disabled but the
738 738 # requirement is set, the clone aborts early, before transferring any
739 739 # data.
740 740 createopts[b'lfs'] = True
741 741
742 742 if extensions.disabled_help(b'lfs'):
743 743 ui.status(
744 744 _(
745 745 b'(remote is using large file support (lfs), but it is '
746 746 b'explicitly disabled in the local configuration)\n'
747 747 )
748 748 )
749 749 else:
750 750 ui.status(
751 751 _(
752 752 b'(remote is using large file support (lfs); lfs will '
753 753 b'be enabled for this repository)\n'
754 754 )
755 755 )
756 756
757 757 shareopts = shareopts or {}
758 758 sharepool = shareopts.get(b'pool')
759 759 sharenamemode = shareopts.get(b'mode')
760 760 if sharepool and islocal(dest):
761 761 sharepath = None
762 762 if sharenamemode == b'identity':
763 763 # Resolve the name from the initial changeset in the remote
764 764 # repository. This returns nullid when the remote is empty. It
765 765 # raises RepoLookupError if revision 0 is filtered or otherwise
766 766 # not available. If we fail to resolve, sharing is not enabled.
767 767 try:
768 768 with srcpeer.commandexecutor() as e:
769 769 rootnode = e.callcommand(
770 770 b'lookup',
771 771 {
772 772 b'key': b'0',
773 773 },
774 774 ).result()
775 775
776 776 if rootnode != nullid:
777 777 sharepath = os.path.join(sharepool, hex(rootnode))
778 778 else:
779 779 ui.status(
780 780 _(
781 781 b'(not using pooled storage: '
782 782 b'remote appears to be empty)\n'
783 783 )
784 784 )
785 785 except error.RepoLookupError:
786 786 ui.status(
787 787 _(
788 788 b'(not using pooled storage: '
789 789 b'unable to resolve identity of remote)\n'
790 790 )
791 791 )
792 792 elif sharenamemode == b'remote':
793 793 sharepath = os.path.join(
794 794 sharepool, hex(hashutil.sha1(source).digest())
795 795 )
796 796 else:
797 797 raise error.Abort(
798 798 _(b'unknown share naming mode: %s') % sharenamemode
799 799 )
800 800
801 801 # TODO this is a somewhat arbitrary restriction.
802 802 if narrow:
803 803 ui.status(
804 804 _(b'(pooled storage not supported for narrow clones)\n')
805 805 )
806 806 sharepath = None
807 807
808 808 if sharepath:
809 809 return clonewithshare(
810 810 ui,
811 811 peeropts,
812 812 sharepath,
813 813 source,
814 814 srcpeer,
815 815 dest,
816 816 pull=pull,
817 817 rev=revs,
818 818 update=update,
819 819 stream=stream,
820 820 )
821 821
822 822 srcrepo = srcpeer.local()
823 823
824 824 abspath = origsource
825 825 if islocal(origsource):
826 826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
827 827
828 828 if islocal(dest):
829 829 cleandir = dest
830 830
831 831 copy = False
832 832 if (
833 833 srcrepo
834 834 and srcrepo.cancopy()
835 835 and islocal(dest)
836 836 and not phases.hassecret(srcrepo)
837 837 ):
838 838 copy = not pull and not revs
839 839
840 840 # TODO this is a somewhat arbitrary restriction.
841 841 if narrow:
842 842 copy = False
843 843
844 844 if copy:
845 845 try:
846 846 # we use a lock here because if we race with commit, we
847 847 # can end up with extra data in the cloned revlogs that's
848 848 # not pointed to by changesets, thus causing verify to
849 849 # fail
850 850 srclock = srcrepo.lock(wait=False)
851 851 except error.LockError:
852 852 copy = False
853 853
854 854 if copy:
855 855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
856 856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
857 857 if not os.path.exists(dest):
858 858 util.makedirs(dest)
859 859 else:
860 860 # only clean up directories we create ourselves
861 861 cleandir = hgdir
862 862 try:
863 863 destpath = hgdir
864 864 util.makedir(destpath, notindexed=True)
865 865 except OSError as inst:
866 866 if inst.errno == errno.EEXIST:
867 867 cleandir = None
868 868 raise error.Abort(
869 869 _(b"destination '%s' already exists") % dest
870 870 )
871 871 raise
872 872
873 873 destlock = copystore(ui, srcrepo, destpath)
874 874 # copy bookmarks over
875 875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
876 876 dstbookmarks = os.path.join(destpath, b'bookmarks')
877 877 if os.path.exists(srcbookmarks):
878 878 util.copyfile(srcbookmarks, dstbookmarks)
879 879
880 880 dstcachedir = os.path.join(destpath, b'cache')
881 881 for cache in cacheutil.cachetocopy(srcrepo):
882 882 _copycache(srcrepo, dstcachedir, cache)
883 883
884 884 # we need to re-init the repo after manually copying the data
885 885 # into it
886 886 destpeer = peer(srcrepo, peeropts, dest)
887 887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
888 888 else:
889 889 try:
890 890 # only pass ui when no srcrepo
891 891 destpeer = peer(
892 892 srcrepo or ui,
893 893 peeropts,
894 894 dest,
895 895 create=True,
896 896 createopts=createopts,
897 897 )
898 898 except OSError as inst:
899 899 if inst.errno == errno.EEXIST:
900 900 cleandir = None
901 901 raise error.Abort(
902 902 _(b"destination '%s' already exists") % dest
903 903 )
904 904 raise
905 905
906 906 if revs:
907 907 if not srcpeer.capable(b'lookup'):
908 908 raise error.Abort(
909 909 _(
910 910 b"src repository does not support "
911 911 b"revision lookup and so doesn't "
912 912 b"support clone by revision"
913 913 )
914 914 )
915 915
916 916 # TODO this is batchable.
917 917 remoterevs = []
918 918 for rev in revs:
919 919 with srcpeer.commandexecutor() as e:
920 920 remoterevs.append(
921 921 e.callcommand(
922 922 b'lookup',
923 923 {
924 924 b'key': rev,
925 925 },
926 926 ).result()
927 927 )
928 928 revs = remoterevs
929 929
930 930 checkout = revs[0]
931 931 else:
932 932 revs = None
933 933 local = destpeer.local()
934 934 if local:
935 935 if narrow:
936 936 with local.wlock(), local.lock():
937 937 local.setnarrowpats(storeincludepats, storeexcludepats)
938 938 narrowspec.copytoworkingcopy(local)
939 939
940 940 u = urlutil.url(abspath)
941 941 defaulturl = bytes(u)
942 942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 943 if not stream:
944 944 if pull:
945 945 stream = False
946 946 else:
947 947 stream = None
948 948 # internal config: ui.quietbookmarkmove
949 949 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 950 with local.ui.configoverride(overrides, b'clone'):
951 951 exchange.pull(
952 952 local,
953 953 srcpeer,
954 954 revs,
955 955 streamclonerequested=stream,
956 956 includepats=storeincludepats,
957 957 excludepats=storeexcludepats,
958 958 depth=depth,
959 959 )
960 960 elif srcrepo:
961 961 # TODO lift restriction once exchange.push() accepts narrow
962 962 # push.
963 963 if narrow:
964 964 raise error.Abort(
965 965 _(
966 966 b'narrow clone not available for '
967 967 b'remote destinations'
968 968 )
969 969 )
970 970
971 971 exchange.push(
972 972 srcrepo,
973 973 destpeer,
974 974 revs=revs,
975 975 bookmarks=srcrepo._bookmarks.keys(),
976 976 )
977 977 else:
978 978 raise error.Abort(
979 979 _(b"clone from remote to remote not supported")
980 980 )
981 981
982 982 cleandir = None
983 983
984 984 destrepo = destpeer.local()
985 985 if destrepo:
986 986 template = uimod.samplehgrcs[b'cloned']
987 987 u = urlutil.url(abspath)
988 988 u.passwd = None
989 989 defaulturl = bytes(u)
990 990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992 992
993 993 if ui.configbool(b'experimental', b'remotenames'):
994 994 logexchange.pullremotenames(destrepo, srcpeer)
995 995
996 996 if update:
997 997 if update is not True:
998 998 with srcpeer.commandexecutor() as e:
999 999 checkout = e.callcommand(
1000 1000 b'lookup',
1001 1001 {
1002 1002 b'key': update,
1003 1003 },
1004 1004 ).result()
1005 1005
1006 1006 uprev = None
1007 1007 status = None
1008 1008 if checkout is not None:
1009 1009 # Some extensions (at least hg-git and hg-subversion) have
1010 1010 # a peer.lookup() implementation that returns a name instead
1011 1011 # of a nodeid. We work around it here until we've figured
1012 1012 # out a better solution.
1013 1013 if len(checkout) == 20 and checkout in destrepo:
1014 1014 uprev = checkout
1015 1015 elif scmutil.isrevsymbol(destrepo, checkout):
1016 1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 1017 else:
1018 1018 if update is not True:
1019 1019 try:
1020 1020 uprev = destrepo.lookup(update)
1021 1021 except error.RepoLookupError:
1022 1022 pass
1023 1023 if uprev is None:
1024 1024 try:
1025 1025 if destrepo._activebookmark:
1026 1026 uprev = destrepo.lookup(destrepo._activebookmark)
1027 1027 update = destrepo._activebookmark
1028 1028 else:
1029 1029 uprev = destrepo._bookmarks[b'@']
1030 1030 update = b'@'
1031 1031 bn = destrepo[uprev].branch()
1032 1032 if bn == b'default':
1033 1033 status = _(b"updating to bookmark %s\n" % update)
1034 1034 else:
1035 1035 status = (
1036 1036 _(b"updating to bookmark %s on branch %s\n")
1037 1037 ) % (update, bn)
1038 1038 except KeyError:
1039 1039 try:
1040 1040 uprev = destrepo.branchtip(b'default')
1041 1041 except error.RepoLookupError:
1042 1042 uprev = destrepo.lookup(b'tip')
1043 1043 if not status:
1044 1044 bn = destrepo[uprev].branch()
1045 1045 status = _(b"updating to branch %s\n") % bn
1046 1046 destrepo.ui.status(status)
1047 1047 _update(destrepo, uprev)
1048 1048 if update in destrepo._bookmarks:
1049 1049 bookmarks.activate(destrepo, update)
1050 1050 if destlock is not None:
1051 1051 release(destlock)
1052 1052 # here is a tiny windows were someone could end up writing the
1053 1053 # repository before the cache are sure to be warm. This is "fine"
1054 1054 # as the only "bad" outcome would be some slowness. That potential
1055 1055 # slowness already affect reader.
1056 1056 with destrepo.lock():
1057 1057 destrepo.updatecaches(full=True)
1058 1058 finally:
1059 1059 release(srclock, destlock)
1060 1060 if cleandir is not None:
1061 1061 shutil.rmtree(cleandir, True)
1062 1062 if srcpeer is not None:
1063 1063 srcpeer.close()
1064 1064 if destpeer and destpeer.local() is None:
1065 1065 destpeer.close()
1066 1066 return srcpeer, destpeer
1067 1067
1068 1068
1069 1069 def _showstats(repo, stats, quietempty=False):
1070 1070 if quietempty and stats.isempty():
1071 1071 return
1072 1072 repo.ui.status(
1073 1073 _(
1074 1074 b"%d files updated, %d files merged, "
1075 1075 b"%d files removed, %d files unresolved\n"
1076 1076 )
1077 1077 % (
1078 1078 stats.updatedcount,
1079 1079 stats.mergedcount,
1080 1080 stats.removedcount,
1081 1081 stats.unresolvedcount,
1082 1082 )
1083 1083 )
1084 1084
1085 1085
1086 1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1087 1087 """Update the working directory to node.
1088 1088
1089 1089 When overwrite is set, changes are clobbered, merged else
1090 1090
1091 1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1092 1092 repo.ui.deprecwarn(
1093 1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1094 1094 b'5.7',
1095 1095 )
1096 1096 return mergemod._update(
1097 1097 repo,
1098 1098 node,
1099 1099 branchmerge=False,
1100 1100 force=overwrite,
1101 1101 labels=[b'working copy', b'destination'],
1102 1102 updatecheck=updatecheck,
1103 1103 )
1104 1104
1105 1105
1106 1106 def update(repo, node, quietempty=False, updatecheck=None):
1107 1107 """update the working directory to node"""
1108 1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1109 1109 _showstats(repo, stats, quietempty)
1110 1110 if stats.unresolvedcount:
1111 1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1112 1112 return stats.unresolvedcount > 0
1113 1113
1114 1114
1115 1115 # naming conflict in clone()
1116 1116 _update = update
1117 1117
1118 1118
1119 1119 def clean(repo, node, show_stats=True, quietempty=False):
1120 1120 """forcibly switch the working directory to node, clobbering changes"""
1121 1121 stats = mergemod.clean_update(repo[node])
1122 1122 assert stats.unresolvedcount == 0
1123 1123 if show_stats:
1124 1124 _showstats(repo, stats, quietempty)
1125 1125 return False
1126 1126
1127 1127
1128 1128 # naming conflict in updatetotally()
1129 1129 _clean = clean
1130 1130
1131 1131 _VALID_UPDATECHECKS = {
1132 1132 mergemod.UPDATECHECK_ABORT,
1133 1133 mergemod.UPDATECHECK_NONE,
1134 1134 mergemod.UPDATECHECK_LINEAR,
1135 1135 mergemod.UPDATECHECK_NO_CONFLICT,
1136 1136 }
1137 1137
1138 1138
1139 1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1140 1140 """Update the working directory with extra care for non-file components
1141 1141
1142 1142 This takes care of non-file components below:
1143 1143
1144 1144 :bookmark: might be advanced or (in)activated
1145 1145
1146 1146 This takes arguments below:
1147 1147
1148 1148 :checkout: to which revision the working directory is updated
1149 1149 :brev: a name, which might be a bookmark to be activated after updating
1150 1150 :clean: whether changes in the working directory can be discarded
1151 1151 :updatecheck: how to deal with a dirty working directory
1152 1152
1153 1153 Valid values for updatecheck are the UPDATECHECK_* constants
1154 1154 defined in the merge module. Passing `None` will result in using the
1155 1155 configured default.
1156 1156
1157 1157 * ABORT: abort if the working directory is dirty
1158 1158 * NONE: don't check (merge working directory changes into destination)
1159 1159 * LINEAR: check that update is linear before merging working directory
1160 1160 changes into destination
1161 1161 * NO_CONFLICT: check that the update does not result in file merges
1162 1162
1163 1163 This returns whether conflict is detected at updating or not.
1164 1164 """
1165 1165 if updatecheck is None:
1166 1166 updatecheck = ui.config(b'commands', b'update.check')
1167 1167 if updatecheck not in _VALID_UPDATECHECKS:
1168 1168 # If not configured, or invalid value configured
1169 1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1170 1170 if updatecheck not in _VALID_UPDATECHECKS:
1171 1171 raise ValueError(
1172 1172 r'Invalid updatecheck value %r (can accept %r)'
1173 1173 % (updatecheck, _VALID_UPDATECHECKS)
1174 1174 )
1175 1175 with repo.wlock():
1176 1176 movemarkfrom = None
1177 1177 warndest = False
1178 1178 if checkout is None:
1179 1179 updata = destutil.destupdate(repo, clean=clean)
1180 1180 checkout, movemarkfrom, brev = updata
1181 1181 warndest = True
1182 1182
1183 1183 if clean:
1184 1184 ret = _clean(repo, checkout)
1185 1185 else:
1186 1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1187 1187 cmdutil.bailifchanged(repo, merge=False)
1188 1188 updatecheck = mergemod.UPDATECHECK_NONE
1189 1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1190 1190
1191 1191 if not ret and movemarkfrom:
1192 1192 if movemarkfrom == repo[b'.'].node():
1193 1193 pass # no-op update
1194 1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1195 1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1196 1196 ui.status(_(b"updating bookmark %s\n") % b)
1197 1197 else:
1198 1198 # this can happen with a non-linear update
1199 1199 b = ui.label(repo._activebookmark, b'bookmarks')
1200 1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 1201 bookmarks.deactivate(repo)
1202 1202 elif brev in repo._bookmarks:
1203 1203 if brev != repo._activebookmark:
1204 1204 b = ui.label(brev, b'bookmarks.active')
1205 1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1206 1206 bookmarks.activate(repo, brev)
1207 1207 elif brev:
1208 1208 if repo._activebookmark:
1209 1209 b = ui.label(repo._activebookmark, b'bookmarks')
1210 1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 1211 bookmarks.deactivate(repo)
1212 1212
1213 1213 if warndest:
1214 1214 destutil.statusotherdests(ui, repo)
1215 1215
1216 1216 return ret
1217 1217
1218 1218
1219 1219 def merge(
1220 1220 ctx,
1221 1221 force=False,
1222 1222 remind=True,
1223 1223 labels=None,
1224 1224 ):
1225 1225 """Branch merge with node, resolving changes. Return true if any
1226 1226 unresolved conflicts."""
1227 1227 repo = ctx.repo()
1228 1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1229 1229 _showstats(repo, stats)
1230 1230 if stats.unresolvedcount:
1231 1231 repo.ui.status(
1232 1232 _(
1233 1233 b"use 'hg resolve' to retry unresolved file merges "
1234 1234 b"or 'hg merge --abort' to abandon\n"
1235 1235 )
1236 1236 )
1237 1237 elif remind:
1238 1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1239 1239 return stats.unresolvedcount > 0
1240 1240
1241 1241
1242 1242 def abortmerge(ui, repo):
1243 1243 ms = mergestatemod.mergestate.read(repo)
1244 1244 if ms.active():
1245 1245 # there were conflicts
1246 1246 node = ms.localctx.hex()
1247 1247 else:
1248 1248 # there were no conficts, mergestate was not stored
1249 1249 node = repo[b'.'].hex()
1250 1250
1251 1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1252 1252 stats = mergemod.clean_update(repo[node])
1253 1253 assert stats.unresolvedcount == 0
1254 1254 _showstats(repo, stats)
1255 1255
1256 1256
1257 1257 def _incoming(
1258 1258 displaychlist,
1259 1259 subreporecurse,
1260 1260 ui,
1261 1261 repo,
1262 1262 source,
1263 1263 opts,
1264 1264 buffered=False,
1265 1265 subpath=None,
1266 1266 ):
1267 1267 """
1268 1268 Helper for incoming / gincoming.
1269 1269 displaychlist gets called with
1270 1270 (remoterepo, incomingchangesetlist, displayer) parameters,
1271 1271 and is supposed to contain only code that can't be unified.
1272 1272 """
1273 1273 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1274 1274 srcs = list(srcs)
1275 1275 if len(srcs) != 1:
1276 1276 msg = _('for now, incoming supports only a single source, %d provided')
1277 1277 msg %= len(srcs)
1278 1278 raise error.Abort(msg)
1279 1279 source, branches = srcs[0]
1280 1280 if subpath is not None:
1281 1281 subpath = urlutil.url(subpath)
1282 1282 if subpath.isabs():
1283 1283 source = bytes(subpath)
1284 1284 else:
1285 1285 p = urlutil.url(source)
1286 1286 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1287 1287 source = bytes(p)
1288 1288 other = peer(repo, opts, source)
1289 1289 cleanupfn = other.close
1290 1290 try:
1291 1291 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1292 1292 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1293 1293
1294 1294 if revs:
1295 1295 revs = [other.lookup(rev) for rev in revs]
1296 1296 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1297 1297 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1298 1298 )
1299 1299
1300 1300 if not chlist:
1301 1301 ui.status(_(b"no changes found\n"))
1302 1302 return subreporecurse()
1303 1303 ui.pager(b'incoming')
1304 1304 displayer = logcmdutil.changesetdisplayer(
1305 1305 ui, other, opts, buffered=buffered
1306 1306 )
1307 1307 displaychlist(other, chlist, displayer)
1308 1308 displayer.close()
1309 1309 finally:
1310 1310 cleanupfn()
1311 1311 subreporecurse()
1312 1312 return 0 # exit code is zero since we found incoming changes
1313 1313
1314 1314
1315 1315 def incoming(ui, repo, source, opts, subpath=None):
1316 1316 def subreporecurse():
1317 1317 ret = 1
1318 1318 if opts.get(b'subrepos'):
1319 1319 ctx = repo[None]
1320 1320 for subpath in sorted(ctx.substate):
1321 1321 sub = ctx.sub(subpath)
1322 1322 ret = min(ret, sub.incoming(ui, source, opts))
1323 1323 return ret
1324 1324
1325 1325 def display(other, chlist, displayer):
1326 1326 limit = logcmdutil.getlimit(opts)
1327 1327 if opts.get(b'newest_first'):
1328 1328 chlist.reverse()
1329 1329 count = 0
1330 1330 for n in chlist:
1331 1331 if limit is not None and count >= limit:
1332 1332 break
1333 1333 parents = [p for p in other.changelog.parents(n) if p != nullid]
1334 1334 if opts.get(b'no_merges') and len(parents) == 2:
1335 1335 continue
1336 1336 count += 1
1337 1337 displayer.show(other[n])
1338 1338
1339 1339 return _incoming(
1340 1340 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1341 1341 )
1342 1342
1343 1343
1344 1344 def _outgoing(ui, repo, dests, opts, subpath=None):
1345 1345 out = set()
1346 1346 others = []
1347 1347 for path in urlutil.get_push_paths(repo, ui, dests):
1348 1348 dest = path.pushloc or path.loc
1349 1349 if subpath is not None:
1350 1350 subpath = urlutil.url(subpath)
1351 1351 if subpath.isabs():
1352 1352 dest = bytes(subpath)
1353 1353 else:
1354 1354 p = urlutil.url(dest)
1355 1355 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1356 1356 dest = bytes(p)
1357 1357 branches = path.branch, opts.get(b'branch') or []
1358 1358
1359 1359 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1360 1360 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1361 1361 if revs:
1362 1362 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1363 1363
1364 1364 other = peer(repo, opts, dest)
1365 1365 try:
1366 1366 outgoing = discovery.findcommonoutgoing(
1367 1367 repo, other, revs, force=opts.get(b'force')
1368 1368 )
1369 1369 o = outgoing.missing
1370 1370 out.update(o)
1371 1371 if not o:
1372 1372 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1373 1373 others.append(other)
1374 1374 except: # re-raises
1375 1375 other.close()
1376 1376 raise
1377 1377 # make sure this is ordered by revision number
1378 1378 outgoing_revs = list(out)
1379 1379 cl = repo.changelog
1380 1380 outgoing_revs.sort(key=cl.rev)
1381 1381 return outgoing_revs, others
1382 1382
1383 1383
1384 1384 def _outgoing_recurse(ui, repo, dests, opts):
1385 1385 ret = 1
1386 1386 if opts.get(b'subrepos'):
1387 1387 ctx = repo[None]
1388 1388 for subpath in sorted(ctx.substate):
1389 1389 sub = ctx.sub(subpath)
1390 1390 ret = min(ret, sub.outgoing(ui, dests, opts))
1391 1391 return ret
1392 1392
1393 1393
1394 1394 def _outgoing_filter(repo, revs, opts):
1395 1395 """apply revision filtering/ordering option for outgoing"""
1396 1396 limit = logcmdutil.getlimit(opts)
1397 1397 no_merges = opts.get(b'no_merges')
1398 1398 if opts.get(b'newest_first'):
1399 1399 revs.reverse()
1400 1400 if limit is None and not no_merges:
1401 1401 for r in revs:
1402 1402 yield r
1403 1403 return
1404 1404
1405 1405 count = 0
1406 1406 cl = repo.changelog
1407 1407 for n in revs:
1408 1408 if limit is not None and count >= limit:
1409 1409 break
1410 1410 parents = [p for p in cl.parents(n) if p != nullid]
1411 1411 if no_merges and len(parents) == 2:
1412 1412 continue
1413 1413 count += 1
1414 1414 yield n
1415 1415
1416 1416
1417 1417 def outgoing(ui, repo, dests, opts, subpath=None):
1418 1418 if opts.get(b'graph'):
1419 1419 logcmdutil.checkunsupportedgraphflags([], opts)
1420 1420 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1421 1421 ret = 1
1422 1422 try:
1423 1423 if o:
1424 1424 ret = 0
1425 1425
1426 1426 if opts.get(b'graph'):
1427 1427 revdag = logcmdutil.graphrevs(repo, o, opts)
1428 1428 ui.pager(b'outgoing')
1429 1429 displayer = logcmdutil.changesetdisplayer(
1430 1430 ui, repo, opts, buffered=True
1431 1431 )
1432 1432 logcmdutil.displaygraph(
1433 1433 ui, repo, revdag, displayer, graphmod.asciiedges
1434 1434 )
1435 1435 else:
1436 1436 ui.pager(b'outgoing')
1437 1437 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1438 1438 for n in _outgoing_filter(repo, o, opts):
1439 1439 displayer.show(repo[n])
1440 1440 displayer.close()
1441 1441 for oth in others:
1442 1442 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1443 1443 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1444 1444 return ret # exit code is zero since we found outgoing changes
1445 1445 finally:
1446 1446 for oth in others:
1447 1447 oth.close()
1448 1448
1449 1449
1450 1450 def verify(repo, level=None):
1451 1451 """verify the consistency of a repository"""
1452 1452 ret = verifymod.verify(repo, level=level)
1453 1453
1454 1454 # Broken subrepo references in hidden csets don't seem worth worrying about,
1455 1455 # since they can't be pushed/pulled, and --hidden can be used if they are a
1456 1456 # concern.
1457 1457
1458 1458 # pathto() is needed for -R case
1459 1459 revs = repo.revs(
1460 1460 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1461 1461 )
1462 1462
1463 1463 if revs:
1464 1464 repo.ui.status(_(b'checking subrepo links\n'))
1465 1465 for rev in revs:
1466 1466 ctx = repo[rev]
1467 1467 try:
1468 1468 for subpath in ctx.substate:
1469 1469 try:
1470 1470 ret = (
1471 1471 ctx.sub(subpath, allowcreate=False).verify() or ret
1472 1472 )
1473 1473 except error.RepoError as e:
1474 1474 repo.ui.warn(b'%d: %s\n' % (rev, e))
1475 1475 except Exception:
1476 1476 repo.ui.warn(
1477 1477 _(b'.hgsubstate is corrupt in revision %s\n')
1478 1478 % short(ctx.node())
1479 1479 )
1480 1480
1481 1481 return ret
1482 1482
1483 1483
1484 1484 def remoteui(src, opts):
1485 1485 """build a remote ui from ui or repo and opts"""
1486 1486 if util.safehasattr(src, b'baseui'): # looks like a repository
1487 1487 dst = src.baseui.copy() # drop repo-specific config
1488 1488 src = src.ui # copy target options from repo
1489 1489 else: # assume it's a global ui object
1490 1490 dst = src.copy() # keep all global options
1491 1491
1492 1492 # copy ssh-specific options
1493 1493 for o in b'ssh', b'remotecmd':
1494 1494 v = opts.get(o) or src.config(b'ui', o)
1495 1495 if v:
1496 1496 dst.setconfig(b"ui", o, v, b'copied')
1497 1497
1498 1498 # copy bundle-specific options
1499 1499 r = src.config(b'bundle', b'mainreporoot')
1500 1500 if r:
1501 1501 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1502 1502
1503 1503 # copy selected local settings to the remote ui
1504 1504 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1505 1505 for key, val in src.configitems(sect):
1506 1506 dst.setconfig(sect, key, val, b'copied')
1507 1507 v = src.config(b'web', b'cacerts')
1508 1508 if v:
1509 1509 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1510 1510
1511 1511 return dst
1512 1512
1513 1513
1514 1514 # Files of interest
1515 1515 # Used to check if the repository has changed looking at mtime and size of
1516 1516 # these files.
1517 1517 foi = [
1518 1518 (b'spath', b'00changelog.i'),
1519 1519 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1520 1520 (b'spath', b'obsstore'),
1521 1521 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1522 1522 ]
1523 1523
1524 1524
1525 1525 class cachedlocalrepo(object):
1526 1526 """Holds a localrepository that can be cached and reused."""
1527 1527
1528 1528 def __init__(self, repo):
1529 1529 """Create a new cached repo from an existing repo.
1530 1530
1531 1531 We assume the passed in repo was recently created. If the
1532 1532 repo has changed between when it was created and when it was
1533 1533 turned into a cache, it may not refresh properly.
1534 1534 """
1535 1535 assert isinstance(repo, localrepo.localrepository)
1536 1536 self._repo = repo
1537 1537 self._state, self.mtime = self._repostate()
1538 1538 self._filtername = repo.filtername
1539 1539
1540 1540 def fetch(self):
1541 1541 """Refresh (if necessary) and return a repository.
1542 1542
1543 1543 If the cached instance is out of date, it will be recreated
1544 1544 automatically and returned.
1545 1545
1546 1546 Returns a tuple of the repo and a boolean indicating whether a new
1547 1547 repo instance was created.
1548 1548 """
1549 1549 # We compare the mtimes and sizes of some well-known files to
1550 1550 # determine if the repo changed. This is not precise, as mtimes
1551 1551 # are susceptible to clock skew and imprecise filesystems and
1552 1552 # file content can change while maintaining the same size.
1553 1553
1554 1554 state, mtime = self._repostate()
1555 1555 if state == self._state:
1556 1556 return self._repo, False
1557 1557
1558 1558 repo = repository(self._repo.baseui, self._repo.url())
1559 1559 if self._filtername:
1560 1560 self._repo = repo.filtered(self._filtername)
1561 1561 else:
1562 1562 self._repo = repo.unfiltered()
1563 1563 self._state = state
1564 1564 self.mtime = mtime
1565 1565
1566 1566 return self._repo, True
1567 1567
1568 1568 def _repostate(self):
1569 1569 state = []
1570 1570 maxmtime = -1
1571 1571 for attr, fname in foi:
1572 1572 prefix = getattr(self._repo, attr)
1573 1573 p = os.path.join(prefix, fname)
1574 1574 try:
1575 1575 st = os.stat(p)
1576 1576 except OSError:
1577 1577 st = os.stat(prefix)
1578 1578 state.append((st[stat.ST_MTIME], st.st_size))
1579 1579 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1580 1580
1581 1581 return tuple(state), maxmtime
1582 1582
1583 1583 def copy(self):
1584 1584 """Obtain a copy of this class instance.
1585 1585
1586 1586 A new localrepository instance is obtained. The new instance should be
1587 1587 completely independent of the original.
1588 1588 """
1589 1589 repo = repository(self._repo.baseui, self._repo.origroot)
1590 1590 if self._filtername:
1591 1591 repo = repo.filtered(self._filtername)
1592 1592 else:
1593 1593 repo = repo.unfiltered()
1594 1594 c = cachedlocalrepo(repo)
1595 1595 c._state = self._state
1596 1596 c.mtime = self.mtime
1597 1597 return c
General Comments 0
You need to be logged in to leave comments. Login now