##// END OF EJS Templates
share: stop using 'islocal' with repo instance...
marmoute -
r50581:229e0ed8 default
parent child Browse files
Show More
@@ -1,1611 +1,1616 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 70 hashbranch, branches = branches
71 71 if not hashbranch and not branches:
72 72 x = revs or None
73 73 if revs:
74 74 y = revs[0]
75 75 else:
76 76 y = None
77 77 return x, y
78 78 if revs:
79 79 revs = list(revs)
80 80 else:
81 81 revs = []
82 82
83 83 if not peer.capable(b'branchmap'):
84 84 if branches:
85 85 raise error.Abort(_(b"remote branch lookup not supported"))
86 86 revs.append(hashbranch)
87 87 return revs, revs[0]
88 88
89 89 with peer.commandexecutor() as e:
90 90 branchmap = e.callcommand(b'branchmap', {}).result()
91 91
92 92 def primary(branch):
93 93 if branch == b'.':
94 94 if not lrepo:
95 95 raise error.Abort(_(b"dirstate branch not accessible"))
96 96 branch = lrepo.dirstate.branch()
97 97 if branch in branchmap:
98 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 99 return True
100 100 else:
101 101 return False
102 102
103 103 for branch in branches:
104 104 if not primary(branch):
105 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 106 if hashbranch:
107 107 if not primary(hashbranch):
108 108 revs.append(hashbranch)
109 109 return revs, revs[0]
110 110
111 111
112 112 def _isfile(path):
113 113 try:
114 114 # we use os.stat() directly here instead of os.path.isfile()
115 115 # because the latter started returning `False` on invalid path
116 116 # exceptions starting in 3.8 and we care about handling
117 117 # invalid paths specially here.
118 118 st = os.stat(path)
119 119 except ValueError as e:
120 120 msg = stringutil.forcebytestr(e)
121 121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 122 except OSError:
123 123 return False
124 124 else:
125 125 return stat.S_ISREG(st.st_mode)
126 126
127 127
128 128 class LocalFactory:
129 129 """thin wrapper to dispatch between localrepo and bundle repo"""
130 130
131 131 @staticmethod
132 132 def islocal(path: bytes) -> bool:
133 133 path = util.expandpath(urlutil.urllocalpath(path))
134 134 return not _isfile(path)
135 135
136 136 @staticmethod
137 137 def instance(ui, path, *args, **kwargs):
138 138 path = util.expandpath(urlutil.urllocalpath(path))
139 139 if _isfile(path):
140 140 cls = bundlerepo
141 141 else:
142 142 cls = localrepo
143 143 return cls.instance(ui, path, *args, **kwargs)
144 144
145 145
146 146 schemes = {
147 147 b'bundle': bundlerepo,
148 148 b'union': unionrepo,
149 149 b'file': LocalFactory,
150 150 b'http': httppeer,
151 151 b'https': httppeer,
152 152 b'ssh': sshpeer,
153 153 b'static-http': statichttprepo,
154 154 }
155 155
156 156
157 157 def _peerlookup(path):
158 158 u = urlutil.url(path)
159 159 scheme = u.scheme or b'file'
160 160 thing = schemes.get(scheme) or schemes[b'file']
161 161 return thing
162 162
163 163
164 164 def islocal(repo):
165 165 '''return true if repo (or path pointing to repo) is local'''
166 166 if isinstance(repo, bytes):
167 167 try:
168 168 return _peerlookup(repo).islocal(repo)
169 169 except AttributeError:
170 170 return False
171 171 return repo.local()
172 172
173 173
174 174 def openpath(ui, path, sendaccept=True):
175 175 '''open path with open if local, url.open if remote'''
176 176 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
177 177 if pathurl.islocal():
178 178 return util.posixfile(pathurl.localpath(), b'rb')
179 179 else:
180 180 return url.open(ui, path, sendaccept=sendaccept)
181 181
182 182
183 183 # a list of (ui, repo) functions called for wire peer initialization
184 184 wirepeersetupfuncs = []
185 185
186 186
187 187 def _peerorrepo(
188 188 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 189 ):
190 190 """return a repository object for the specified path"""
191 191 cls = _peerlookup(path)
192 192 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
193 193 _setup_repo_or_peer(ui, obj, presetupfuncs)
194 194 return obj
195 195
196 196
197 197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 198 ui = getattr(obj, "ui", ui)
199 199 for f in presetupfuncs or []:
200 200 f(ui, obj)
201 201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 202 with util.timedcm('all reposetup') as allreposetupstats:
203 203 for name, module in extensions.extensions(ui):
204 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 205 hook = getattr(module, 'reposetup', None)
206 206 if hook:
207 207 with util.timedcm('reposetup %r', name) as stats:
208 208 hook(ui, obj)
209 209 msg = b' > reposetup for %s took %s\n'
210 210 ui.log(b'extension', msg, name, stats)
211 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 212 if not obj.local():
213 213 for f in wirepeersetupfuncs:
214 214 f(ui, obj)
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 peer = _peerorrepo(
227 227 ui,
228 228 path,
229 229 create,
230 230 presetupfuncs=presetupfuncs,
231 231 intents=intents,
232 232 createopts=createopts,
233 233 )
234 234 repo = peer.local()
235 235 if not repo:
236 236 raise error.Abort(
237 237 _(b"repository '%s' is not local") % (path or peer.url())
238 238 )
239 239 return repo.filtered(b'visible')
240 240
241 241
242 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 243 '''return a repository peer for the specified path'''
244 244 rui = remoteui(uiorrepo, opts)
245 245 return _peerorrepo(
246 246 rui, path, create, intents=intents, createopts=createopts
247 247 ).peer()
248 248
249 249
250 250 def defaultdest(source):
251 251 """return default destination of clone if none is given
252 252
253 253 >>> defaultdest(b'foo')
254 254 'foo'
255 255 >>> defaultdest(b'/foo/bar')
256 256 'bar'
257 257 >>> defaultdest(b'/')
258 258 ''
259 259 >>> defaultdest(b'')
260 260 ''
261 261 >>> defaultdest(b'http://example.org/')
262 262 ''
263 263 >>> defaultdest(b'http://example.org/foo/')
264 264 'foo'
265 265 """
266 266 path = urlutil.url(source).path
267 267 if not path:
268 268 return b''
269 269 return os.path.basename(os.path.normpath(path))
270 270
271 271
272 272 def sharedreposource(repo):
273 273 """Returns repository object for source repository of a shared repo.
274 274
275 275 If repo is not a shared repository, returns None.
276 276 """
277 277 if repo.sharedpath == repo.path:
278 278 return None
279 279
280 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 281 return repo.srcrepo
282 282
283 283 # the sharedpath always ends in the .hg; we want the path to the repo
284 284 source = repo.vfs.split(repo.sharedpath)[0]
285 285 srcurl, branches = urlutil.parseurl(source)
286 286 srcrepo = repository(repo.ui, srcurl)
287 287 repo.srcrepo = srcrepo
288 288 return srcrepo
289 289
290 290
291 291 def share(
292 292 ui,
293 293 source,
294 294 dest=None,
295 295 update=True,
296 296 bookmarks=True,
297 297 defaultpath=None,
298 298 relative=False,
299 299 ):
300 300 '''create a shared repository'''
301 301
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
302 not_local_msg = _(b'can only share local repositories')
303 if util.safehasattr(source, 'local'):
304 if source.local() is None:
305 raise error.Abort(not_local_msg)
306 elif not islocal(source):
307 # XXX why are we getting bytes here ?
308 raise error.Abort(not_local_msg)
304 309
305 310 if not dest:
306 311 dest = defaultdest(source)
307 312 else:
308 313 dest = urlutil.get_clone_path(ui, dest)[1]
309 314
310 315 if isinstance(source, bytes):
311 316 origsource, source, branches = urlutil.get_clone_path(ui, source)
312 317 srcrepo = repository(ui, source)
313 318 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 319 else:
315 320 srcrepo = source.local()
316 321 checkout = None
317 322
318 323 shareditems = set()
319 324 if bookmarks:
320 325 shareditems.add(sharedbookmarks)
321 326
322 327 r = repository(
323 328 ui,
324 329 dest,
325 330 create=True,
326 331 createopts={
327 332 b'sharedrepo': srcrepo,
328 333 b'sharedrelative': relative,
329 334 b'shareditems': shareditems,
330 335 },
331 336 )
332 337
333 338 postshare(srcrepo, r, defaultpath=defaultpath)
334 339 r = repository(ui, dest)
335 340 _postshareupdate(r, update, checkout=checkout)
336 341 return r
337 342
338 343
339 344 def _prependsourcehgrc(repo):
340 345 """copies the source repo config and prepend it in current repo .hg/hgrc
341 346 on unshare. This is only done if the share was perfomed using share safe
342 347 method where we share config of source in shares"""
343 348 srcvfs = vfsmod.vfs(repo.sharedpath)
344 349 dstvfs = vfsmod.vfs(repo.path)
345 350
346 351 if not srcvfs.exists(b'hgrc'):
347 352 return
348 353
349 354 currentconfig = b''
350 355 if dstvfs.exists(b'hgrc'):
351 356 currentconfig = dstvfs.read(b'hgrc')
352 357
353 358 with dstvfs(b'hgrc', b'wb') as fp:
354 359 sourceconfig = srcvfs.read(b'hgrc')
355 360 fp.write(b"# Config copied from shared source\n")
356 361 fp.write(sourceconfig)
357 362 fp.write(b'\n')
358 363 fp.write(currentconfig)
359 364
360 365
361 366 def unshare(ui, repo):
362 367 """convert a shared repository to a normal one
363 368
364 369 Copy the store data to the repo and remove the sharedpath data.
365 370
366 371 Returns a new repository object representing the unshared repository.
367 372
368 373 The passed repository object is not usable after this function is
369 374 called.
370 375 """
371 376
372 377 with repo.lock():
373 378 # we use locks here because if we race with commit, we
374 379 # can end up with extra data in the cloned revlogs that's
375 380 # not pointed to by changesets, thus causing verify to
376 381 # fail
377 382 destlock = copystore(ui, repo, repo.path)
378 383 with destlock or util.nullcontextmanager():
379 384 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 385 # we were sharing .hg/hgrc of the share source with the current
381 386 # repo. We need to copy that while unsharing otherwise it can
382 387 # disable hooks and other checks
383 388 _prependsourcehgrc(repo)
384 389
385 390 sharefile = repo.vfs.join(b'sharedpath')
386 391 util.rename(sharefile, sharefile + b'.old')
387 392
388 393 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 394 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 395 scmutil.writereporequirements(repo)
391 396
392 397 # Removing share changes some fundamental properties of the repo instance.
393 398 # So we instantiate a new repo object and operate on it rather than
394 399 # try to keep the existing repo usable.
395 400 newrepo = repository(repo.baseui, repo.root, create=False)
396 401
397 402 # TODO: figure out how to access subrepos that exist, but were previously
398 403 # removed from .hgsub
399 404 c = newrepo[b'.']
400 405 subs = c.substate
401 406 for s in sorted(subs):
402 407 c.sub(s).unshare()
403 408
404 409 localrepo.poisonrepository(repo)
405 410
406 411 return newrepo
407 412
408 413
409 414 def postshare(sourcerepo, destrepo, defaultpath=None):
410 415 """Called after a new shared repo is created.
411 416
412 417 The new repo only has a requirements file and pointer to the source.
413 418 This function configures additional shared data.
414 419
415 420 Extensions can wrap this function and write additional entries to
416 421 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 422 """
418 423 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 424 if default:
420 425 template = b'[paths]\ndefault = %s\n'
421 426 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 427 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 428 with destrepo.wlock():
424 429 narrowspec.copytoworkingcopy(destrepo)
425 430
426 431
427 432 def _postshareupdate(repo, update, checkout=None):
428 433 """Maybe perform a working directory update after a shared repo is created.
429 434
430 435 ``update`` can be a boolean or a revision to update to.
431 436 """
432 437 if not update:
433 438 return
434 439
435 440 repo.ui.status(_(b"updating working directory\n"))
436 441 if update is not True:
437 442 checkout = update
438 443 for test in (checkout, b'default', b'tip'):
439 444 if test is None:
440 445 continue
441 446 try:
442 447 uprev = repo.lookup(test)
443 448 break
444 449 except error.RepoLookupError:
445 450 continue
446 451 _update(repo, uprev)
447 452
448 453
449 454 def copystore(ui, srcrepo, destpath):
450 455 """copy files from store of srcrepo in destpath
451 456
452 457 returns destlock
453 458 """
454 459 destlock = None
455 460 try:
456 461 hardlink = None
457 462 topic = _(b'linking') if hardlink else _(b'copying')
458 463 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 464 num = 0
460 465 srcpublishing = srcrepo.publishing()
461 466 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 467 dstvfs = vfsmod.vfs(destpath)
463 468 for f in srcrepo.store.copylist():
464 469 if srcpublishing and f.endswith(b'phaseroots'):
465 470 continue
466 471 dstbase = os.path.dirname(f)
467 472 if dstbase and not dstvfs.exists(dstbase):
468 473 dstvfs.mkdir(dstbase)
469 474 if srcvfs.exists(f):
470 475 if f.endswith(b'data'):
471 476 # 'dstbase' may be empty (e.g. revlog format 0)
472 477 lockfile = os.path.join(dstbase, b"lock")
473 478 # lock to avoid premature writing to the target
474 479 destlock = lock.lock(dstvfs, lockfile)
475 480 hardlink, n = util.copyfiles(
476 481 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 482 )
478 483 num += n
479 484 if hardlink:
480 485 ui.debug(b"linked %d files\n" % num)
481 486 else:
482 487 ui.debug(b"copied %d files\n" % num)
483 488 return destlock
484 489 except: # re-raises
485 490 release(destlock)
486 491 raise
487 492
488 493
489 494 def clonewithshare(
490 495 ui,
491 496 peeropts,
492 497 sharepath,
493 498 source,
494 499 srcpeer,
495 500 dest,
496 501 pull=False,
497 502 rev=None,
498 503 update=True,
499 504 stream=False,
500 505 ):
501 506 """Perform a clone using a shared repo.
502 507
503 508 The store for the repository will be located at <sharepath>/.hg. The
504 509 specified revisions will be cloned or pulled from "source". A shared repo
505 510 will be created at "dest" and a working copy will be created if "update" is
506 511 True.
507 512 """
508 513 revs = None
509 514 if rev:
510 515 if not srcpeer.capable(b'lookup'):
511 516 raise error.Abort(
512 517 _(
513 518 b"src repository does not support "
514 519 b"revision lookup and so doesn't "
515 520 b"support clone by revision"
516 521 )
517 522 )
518 523
519 524 # TODO this is batchable.
520 525 remoterevs = []
521 526 for r in rev:
522 527 with srcpeer.commandexecutor() as e:
523 528 remoterevs.append(
524 529 e.callcommand(
525 530 b'lookup',
526 531 {
527 532 b'key': r,
528 533 },
529 534 ).result()
530 535 )
531 536 revs = remoterevs
532 537
533 538 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 539 # 2 clients may race creating or populating it.
535 540 pooldir = os.path.dirname(sharepath)
536 541 # lock class requires the directory to exist.
537 542 try:
538 543 util.makedir(pooldir, False)
539 544 except FileExistsError:
540 545 pass
541 546
542 547 poolvfs = vfsmod.vfs(pooldir)
543 548 basename = os.path.basename(sharepath)
544 549
545 550 with lock.lock(poolvfs, b'%s.lock' % basename):
546 551 if os.path.exists(sharepath):
547 552 ui.status(
548 553 _(b'(sharing from existing pooled repository %s)\n') % basename
549 554 )
550 555 else:
551 556 ui.status(
552 557 _(b'(sharing from new pooled repository %s)\n') % basename
553 558 )
554 559 # Always use pull mode because hardlinks in share mode don't work
555 560 # well. Never update because working copies aren't necessary in
556 561 # share mode.
557 562 clone(
558 563 ui,
559 564 peeropts,
560 565 source,
561 566 dest=sharepath,
562 567 pull=True,
563 568 revs=rev,
564 569 update=False,
565 570 stream=stream,
566 571 )
567 572
568 573 # Resolve the value to put in [paths] section for the source.
569 574 if islocal(source):
570 575 defaultpath = util.abspath(urlutil.urllocalpath(source))
571 576 else:
572 577 defaultpath = source
573 578
574 579 sharerepo = repository(ui, path=sharepath)
575 580 destrepo = share(
576 581 ui,
577 582 sharerepo,
578 583 dest=dest,
579 584 update=False,
580 585 bookmarks=False,
581 586 defaultpath=defaultpath,
582 587 )
583 588
584 589 # We need to perform a pull against the dest repo to fetch bookmarks
585 590 # and other non-store data that isn't shared by default. In the case of
586 591 # non-existing shared repo, this means we pull from the remote twice. This
587 592 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 593 # way to pull just non-changegroup data.
589 594 exchange.pull(destrepo, srcpeer, heads=revs)
590 595
591 596 _postshareupdate(destrepo, update)
592 597
593 598 return srcpeer, peer(ui, peeropts, dest)
594 599
595 600
596 601 # Recomputing caches is often slow on big repos, so copy them.
597 602 def _copycache(srcrepo, dstcachedir, fname):
598 603 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 604 srcfname = srcrepo.cachevfs.join(fname)
600 605 dstfname = os.path.join(dstcachedir, fname)
601 606 if os.path.exists(srcfname):
602 607 if not os.path.exists(dstcachedir):
603 608 os.mkdir(dstcachedir)
604 609 util.copyfile(srcfname, dstfname)
605 610
606 611
607 612 def clone(
608 613 ui,
609 614 peeropts,
610 615 source,
611 616 dest=None,
612 617 pull=False,
613 618 revs=None,
614 619 update=True,
615 620 stream=False,
616 621 branch=None,
617 622 shareopts=None,
618 623 storeincludepats=None,
619 624 storeexcludepats=None,
620 625 depth=None,
621 626 ):
622 627 """Make a copy of an existing repository.
623 628
624 629 Create a copy of an existing repository in a new directory. The
625 630 source and destination are URLs, as passed to the repository
626 631 function. Returns a pair of repository peers, the source and
627 632 newly created destination.
628 633
629 634 The location of the source is added to the new repository's
630 635 .hg/hgrc file, as the default to be used for future pulls and
631 636 pushes.
632 637
633 638 If an exception is raised, the partly cloned/updated destination
634 639 repository will be deleted.
635 640
636 641 Arguments:
637 642
638 643 source: repository object or URL
639 644
640 645 dest: URL of destination repository to create (defaults to base
641 646 name of source repository)
642 647
643 648 pull: always pull from source repository, even in local case or if the
644 649 server prefers streaming
645 650
646 651 stream: stream raw data uncompressed from repository (fast over
647 652 LAN, slow over WAN)
648 653
649 654 revs: revision to clone up to (implies pull=True)
650 655
651 656 update: update working directory after clone completes, if
652 657 destination is local repository (True means update to default rev,
653 658 anything else is treated as a revision)
654 659
655 660 branch: branches to clone
656 661
657 662 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 663 activates auto sharing mode and defines the directory for stores. The
659 664 "mode" key determines how to construct the directory name of the shared
660 665 repository. "identity" means the name is derived from the node of the first
661 666 changeset in the repository. "remote" means the name is derived from the
662 667 remote's path/URL. Defaults to "identity."
663 668
664 669 storeincludepats and storeexcludepats: sets of file patterns to include and
665 670 exclude in the repository copy, respectively. If not defined, all files
666 671 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 672 only the requested files will be performed. If ``storeincludepats`` is not
668 673 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 674 ``path:.``. If both are empty sets, no files will be cloned.
670 675 """
671 676
672 677 if isinstance(source, bytes):
673 678 src = urlutil.get_clone_path(ui, source, branch)
674 679 origsource, source, branches = src
675 680 srcpeer = peer(ui, peeropts, source)
676 681 else:
677 682 srcpeer = source.peer() # in case we were called with a localrepo
678 683 branches = (None, branch or [])
679 684 origsource = source = srcpeer.url()
680 685 srclock = destlock = destwlock = cleandir = None
681 686 destpeer = None
682 687 try:
683 688 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 689
685 690 if dest is None:
686 691 dest = defaultdest(source)
687 692 if dest:
688 693 ui.status(_(b"destination directory: %s\n") % dest)
689 694 else:
690 695 dest = urlutil.get_clone_path(ui, dest)[0]
691 696
692 697 dest = urlutil.urllocalpath(dest)
693 698 source = urlutil.urllocalpath(source)
694 699
695 700 if not dest:
696 701 raise error.InputError(_(b"empty destination path is not valid"))
697 702
698 703 destvfs = vfsmod.vfs(dest, expandpath=True)
699 704 if destvfs.lexists():
700 705 if not destvfs.isdir():
701 706 raise error.InputError(
702 707 _(b"destination '%s' already exists") % dest
703 708 )
704 709 elif destvfs.listdir():
705 710 raise error.InputError(
706 711 _(b"destination '%s' is not empty") % dest
707 712 )
708 713
709 714 createopts = {}
710 715 narrow = False
711 716
712 717 if storeincludepats is not None:
713 718 narrowspec.validatepatterns(storeincludepats)
714 719 narrow = True
715 720
716 721 if storeexcludepats is not None:
717 722 narrowspec.validatepatterns(storeexcludepats)
718 723 narrow = True
719 724
720 725 if narrow:
721 726 # Include everything by default if only exclusion patterns defined.
722 727 if storeexcludepats and not storeincludepats:
723 728 storeincludepats = {b'path:.'}
724 729
725 730 createopts[b'narrowfiles'] = True
726 731
727 732 if depth:
728 733 createopts[b'shallowfilestore'] = True
729 734
730 735 if srcpeer.capable(b'lfs-serve'):
731 736 # Repository creation honors the config if it disabled the extension, so
732 737 # we can't just announce that lfs will be enabled. This check avoids
733 738 # saying that lfs will be enabled, and then saying it's an unknown
734 739 # feature. The lfs creation option is set in either case so that a
735 740 # requirement is added. If the extension is explicitly disabled but the
736 741 # requirement is set, the clone aborts early, before transferring any
737 742 # data.
738 743 createopts[b'lfs'] = True
739 744
740 745 if extensions.disabled_help(b'lfs'):
741 746 ui.status(
742 747 _(
743 748 b'(remote is using large file support (lfs), but it is '
744 749 b'explicitly disabled in the local configuration)\n'
745 750 )
746 751 )
747 752 else:
748 753 ui.status(
749 754 _(
750 755 b'(remote is using large file support (lfs); lfs will '
751 756 b'be enabled for this repository)\n'
752 757 )
753 758 )
754 759
755 760 shareopts = shareopts or {}
756 761 sharepool = shareopts.get(b'pool')
757 762 sharenamemode = shareopts.get(b'mode')
758 763 if sharepool and islocal(dest):
759 764 sharepath = None
760 765 if sharenamemode == b'identity':
761 766 # Resolve the name from the initial changeset in the remote
762 767 # repository. This returns nullid when the remote is empty. It
763 768 # raises RepoLookupError if revision 0 is filtered or otherwise
764 769 # not available. If we fail to resolve, sharing is not enabled.
765 770 try:
766 771 with srcpeer.commandexecutor() as e:
767 772 rootnode = e.callcommand(
768 773 b'lookup',
769 774 {
770 775 b'key': b'0',
771 776 },
772 777 ).result()
773 778
774 779 if rootnode != sha1nodeconstants.nullid:
775 780 sharepath = os.path.join(sharepool, hex(rootnode))
776 781 else:
777 782 ui.status(
778 783 _(
779 784 b'(not using pooled storage: '
780 785 b'remote appears to be empty)\n'
781 786 )
782 787 )
783 788 except error.RepoLookupError:
784 789 ui.status(
785 790 _(
786 791 b'(not using pooled storage: '
787 792 b'unable to resolve identity of remote)\n'
788 793 )
789 794 )
790 795 elif sharenamemode == b'remote':
791 796 sharepath = os.path.join(
792 797 sharepool, hex(hashutil.sha1(source).digest())
793 798 )
794 799 else:
795 800 raise error.Abort(
796 801 _(b'unknown share naming mode: %s') % sharenamemode
797 802 )
798 803
799 804 # TODO this is a somewhat arbitrary restriction.
800 805 if narrow:
801 806 ui.status(
802 807 _(b'(pooled storage not supported for narrow clones)\n')
803 808 )
804 809 sharepath = None
805 810
806 811 if sharepath:
807 812 return clonewithshare(
808 813 ui,
809 814 peeropts,
810 815 sharepath,
811 816 source,
812 817 srcpeer,
813 818 dest,
814 819 pull=pull,
815 820 rev=revs,
816 821 update=update,
817 822 stream=stream,
818 823 )
819 824
820 825 srcrepo = srcpeer.local()
821 826
822 827 abspath = origsource
823 828 if islocal(origsource):
824 829 abspath = util.abspath(urlutil.urllocalpath(origsource))
825 830
826 831 if islocal(dest):
827 832 if os.path.exists(dest):
828 833 # only clean up directories we create ourselves
829 834 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830 835 cleandir = hgdir
831 836 else:
832 837 cleandir = dest
833 838
834 839 copy = False
835 840 if (
836 841 srcrepo
837 842 and srcrepo.cancopy()
838 843 and islocal(dest)
839 844 and not phases.hassecret(srcrepo)
840 845 ):
841 846 copy = not pull and not revs
842 847
843 848 # TODO this is a somewhat arbitrary restriction.
844 849 if narrow:
845 850 copy = False
846 851
847 852 if copy:
848 853 try:
849 854 # we use a lock here because if we race with commit, we
850 855 # can end up with extra data in the cloned revlogs that's
851 856 # not pointed to by changesets, thus causing verify to
852 857 # fail
853 858 srclock = srcrepo.lock(wait=False)
854 859 except error.LockError:
855 860 copy = False
856 861
857 862 if copy:
858 863 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859 864
860 865 destrootpath = urlutil.urllocalpath(dest)
861 866 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862 867 localrepo.createrepository(
863 868 ui,
864 869 destrootpath,
865 870 requirements=dest_reqs,
866 871 )
867 872 destrepo = localrepo.makelocalrepository(ui, destrootpath)
868 873
869 874 destwlock = destrepo.wlock()
870 875 destlock = destrepo.lock()
871 876 from . import streamclone # avoid cycle
872 877
873 878 streamclone.local_copy(srcrepo, destrepo)
874 879
875 880 # we need to re-init the repo after manually copying the data
876 881 # into it
877 882 destpeer = peer(srcrepo, peeropts, dest)
878 883
879 884 # make the peer aware that is it already locked
880 885 #
881 886 # important:
882 887 #
883 888 # We still need to release that lock at the end of the function
884 889 destpeer.local()._lockref = weakref.ref(destlock)
885 890 destpeer.local()._wlockref = weakref.ref(destwlock)
886 891 # dirstate also needs to be copied because `_wlockref` has a reference
887 892 # to it: this dirstate is saved to disk when the wlock is released
888 893 destpeer.local().dirstate = destrepo.dirstate
889 894
890 895 srcrepo.hook(
891 896 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
892 897 )
893 898 else:
894 899 try:
895 900 # only pass ui when no srcrepo
896 901 destpeer = peer(
897 902 srcrepo or ui,
898 903 peeropts,
899 904 dest,
900 905 create=True,
901 906 createopts=createopts,
902 907 )
903 908 except FileExistsError:
904 909 cleandir = None
905 910 raise error.Abort(_(b"destination '%s' already exists") % dest)
906 911
907 912 if revs:
908 913 if not srcpeer.capable(b'lookup'):
909 914 raise error.Abort(
910 915 _(
911 916 b"src repository does not support "
912 917 b"revision lookup and so doesn't "
913 918 b"support clone by revision"
914 919 )
915 920 )
916 921
917 922 # TODO this is batchable.
918 923 remoterevs = []
919 924 for rev in revs:
920 925 with srcpeer.commandexecutor() as e:
921 926 remoterevs.append(
922 927 e.callcommand(
923 928 b'lookup',
924 929 {
925 930 b'key': rev,
926 931 },
927 932 ).result()
928 933 )
929 934 revs = remoterevs
930 935
931 936 checkout = revs[0]
932 937 else:
933 938 revs = None
934 939 local = destpeer.local()
935 940 if local:
936 941 if narrow:
937 942 with local.wlock(), local.lock():
938 943 local.setnarrowpats(storeincludepats, storeexcludepats)
939 944 narrowspec.copytoworkingcopy(local)
940 945
941 946 u = urlutil.url(abspath)
942 947 defaulturl = bytes(u)
943 948 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
944 949 if not stream:
945 950 if pull:
946 951 stream = False
947 952 else:
948 953 stream = None
949 954 # internal config: ui.quietbookmarkmove
950 955 overrides = {(b'ui', b'quietbookmarkmove'): True}
951 956 with local.ui.configoverride(overrides, b'clone'):
952 957 exchange.pull(
953 958 local,
954 959 srcpeer,
955 960 heads=revs,
956 961 streamclonerequested=stream,
957 962 includepats=storeincludepats,
958 963 excludepats=storeexcludepats,
959 964 depth=depth,
960 965 )
961 966 elif srcrepo:
962 967 # TODO lift restriction once exchange.push() accepts narrow
963 968 # push.
964 969 if narrow:
965 970 raise error.Abort(
966 971 _(
967 972 b'narrow clone not available for '
968 973 b'remote destinations'
969 974 )
970 975 )
971 976
972 977 exchange.push(
973 978 srcrepo,
974 979 destpeer,
975 980 revs=revs,
976 981 bookmarks=srcrepo._bookmarks.keys(),
977 982 )
978 983 else:
979 984 raise error.Abort(
980 985 _(b"clone from remote to remote not supported")
981 986 )
982 987
983 988 cleandir = None
984 989
985 990 destrepo = destpeer.local()
986 991 if destrepo:
987 992 template = uimod.samplehgrcs[b'cloned']
988 993 u = urlutil.url(abspath)
989 994 u.passwd = None
990 995 defaulturl = bytes(u)
991 996 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
992 997 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
993 998
994 999 if ui.configbool(b'experimental', b'remotenames'):
995 1000 logexchange.pullremotenames(destrepo, srcpeer)
996 1001
997 1002 if update:
998 1003 if update is not True:
999 1004 with srcpeer.commandexecutor() as e:
1000 1005 checkout = e.callcommand(
1001 1006 b'lookup',
1002 1007 {
1003 1008 b'key': update,
1004 1009 },
1005 1010 ).result()
1006 1011
1007 1012 uprev = None
1008 1013 status = None
1009 1014 if checkout is not None:
1010 1015 # Some extensions (at least hg-git and hg-subversion) have
1011 1016 # a peer.lookup() implementation that returns a name instead
1012 1017 # of a nodeid. We work around it here until we've figured
1013 1018 # out a better solution.
1014 1019 if len(checkout) == 20 and checkout in destrepo:
1015 1020 uprev = checkout
1016 1021 elif scmutil.isrevsymbol(destrepo, checkout):
1017 1022 uprev = scmutil.revsymbol(destrepo, checkout).node()
1018 1023 else:
1019 1024 if update is not True:
1020 1025 try:
1021 1026 uprev = destrepo.lookup(update)
1022 1027 except error.RepoLookupError:
1023 1028 pass
1024 1029 if uprev is None:
1025 1030 try:
1026 1031 if destrepo._activebookmark:
1027 1032 uprev = destrepo.lookup(destrepo._activebookmark)
1028 1033 update = destrepo._activebookmark
1029 1034 else:
1030 1035 uprev = destrepo._bookmarks[b'@']
1031 1036 update = b'@'
1032 1037 bn = destrepo[uprev].branch()
1033 1038 if bn == b'default':
1034 1039 status = _(b"updating to bookmark %s\n" % update)
1035 1040 else:
1036 1041 status = (
1037 1042 _(b"updating to bookmark %s on branch %s\n")
1038 1043 ) % (update, bn)
1039 1044 except KeyError:
1040 1045 try:
1041 1046 uprev = destrepo.branchtip(b'default')
1042 1047 except error.RepoLookupError:
1043 1048 uprev = destrepo.lookup(b'tip')
1044 1049 if not status:
1045 1050 bn = destrepo[uprev].branch()
1046 1051 status = _(b"updating to branch %s\n") % bn
1047 1052 destrepo.ui.status(status)
1048 1053 _update(destrepo, uprev)
1049 1054 if update in destrepo._bookmarks:
1050 1055 bookmarks.activate(destrepo, update)
1051 1056 if destlock is not None:
1052 1057 release(destlock)
1053 1058 if destwlock is not None:
1054 1059 release(destlock)
1055 1060 # here is a tiny windows were someone could end up writing the
1056 1061 # repository before the cache are sure to be warm. This is "fine"
1057 1062 # as the only "bad" outcome would be some slowness. That potential
1058 1063 # slowness already affect reader.
1059 1064 with destrepo.lock():
1060 1065 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1061 1066 finally:
1062 1067 release(srclock, destlock, destwlock)
1063 1068 if cleandir is not None:
1064 1069 shutil.rmtree(cleandir, True)
1065 1070 if srcpeer is not None:
1066 1071 srcpeer.close()
1067 1072 if destpeer and destpeer.local() is None:
1068 1073 destpeer.close()
1069 1074 return srcpeer, destpeer
1070 1075
1071 1076
1072 1077 def _showstats(repo, stats, quietempty=False):
1073 1078 if quietempty and stats.isempty():
1074 1079 return
1075 1080 repo.ui.status(
1076 1081 _(
1077 1082 b"%d files updated, %d files merged, "
1078 1083 b"%d files removed, %d files unresolved\n"
1079 1084 )
1080 1085 % (
1081 1086 stats.updatedcount,
1082 1087 stats.mergedcount,
1083 1088 stats.removedcount,
1084 1089 stats.unresolvedcount,
1085 1090 )
1086 1091 )
1087 1092
1088 1093
1089 1094 def updaterepo(repo, node, overwrite, updatecheck=None):
1090 1095 """Update the working directory to node.
1091 1096
1092 1097 When overwrite is set, changes are clobbered, merged else
1093 1098
1094 1099 returns stats (see pydoc mercurial.merge.applyupdates)"""
1095 1100 repo.ui.deprecwarn(
1096 1101 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1097 1102 b'5.7',
1098 1103 )
1099 1104 return mergemod._update(
1100 1105 repo,
1101 1106 node,
1102 1107 branchmerge=False,
1103 1108 force=overwrite,
1104 1109 labels=[b'working copy', b'destination'],
1105 1110 updatecheck=updatecheck,
1106 1111 )
1107 1112
1108 1113
1109 1114 def update(repo, node, quietempty=False, updatecheck=None):
1110 1115 """update the working directory to node"""
1111 1116 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1112 1117 _showstats(repo, stats, quietempty)
1113 1118 if stats.unresolvedcount:
1114 1119 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1115 1120 return stats.unresolvedcount > 0
1116 1121
1117 1122
1118 1123 # naming conflict in clone()
1119 1124 _update = update
1120 1125
1121 1126
1122 1127 def clean(repo, node, show_stats=True, quietempty=False):
1123 1128 """forcibly switch the working directory to node, clobbering changes"""
1124 1129 stats = mergemod.clean_update(repo[node])
1125 1130 assert stats.unresolvedcount == 0
1126 1131 if show_stats:
1127 1132 _showstats(repo, stats, quietempty)
1128 1133 return False
1129 1134
1130 1135
1131 1136 # naming conflict in updatetotally()
1132 1137 _clean = clean
1133 1138
1134 1139 _VALID_UPDATECHECKS = {
1135 1140 mergemod.UPDATECHECK_ABORT,
1136 1141 mergemod.UPDATECHECK_NONE,
1137 1142 mergemod.UPDATECHECK_LINEAR,
1138 1143 mergemod.UPDATECHECK_NO_CONFLICT,
1139 1144 }
1140 1145
1141 1146
1142 1147 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1143 1148 """Update the working directory with extra care for non-file components
1144 1149
1145 1150 This takes care of non-file components below:
1146 1151
1147 1152 :bookmark: might be advanced or (in)activated
1148 1153
1149 1154 This takes arguments below:
1150 1155
1151 1156 :checkout: to which revision the working directory is updated
1152 1157 :brev: a name, which might be a bookmark to be activated after updating
1153 1158 :clean: whether changes in the working directory can be discarded
1154 1159 :updatecheck: how to deal with a dirty working directory
1155 1160
1156 1161 Valid values for updatecheck are the UPDATECHECK_* constants
1157 1162 defined in the merge module. Passing `None` will result in using the
1158 1163 configured default.
1159 1164
1160 1165 * ABORT: abort if the working directory is dirty
1161 1166 * NONE: don't check (merge working directory changes into destination)
1162 1167 * LINEAR: check that update is linear before merging working directory
1163 1168 changes into destination
1164 1169 * NO_CONFLICT: check that the update does not result in file merges
1165 1170
1166 1171 This returns whether conflict is detected at updating or not.
1167 1172 """
1168 1173 if updatecheck is None:
1169 1174 updatecheck = ui.config(b'commands', b'update.check')
1170 1175 if updatecheck not in _VALID_UPDATECHECKS:
1171 1176 # If not configured, or invalid value configured
1172 1177 updatecheck = mergemod.UPDATECHECK_LINEAR
1173 1178 if updatecheck not in _VALID_UPDATECHECKS:
1174 1179 raise ValueError(
1175 1180 r'Invalid updatecheck value %r (can accept %r)'
1176 1181 % (updatecheck, _VALID_UPDATECHECKS)
1177 1182 )
1178 1183 with repo.wlock():
1179 1184 movemarkfrom = None
1180 1185 warndest = False
1181 1186 if checkout is None:
1182 1187 updata = destutil.destupdate(repo, clean=clean)
1183 1188 checkout, movemarkfrom, brev = updata
1184 1189 warndest = True
1185 1190
1186 1191 if clean:
1187 1192 ret = _clean(repo, checkout)
1188 1193 else:
1189 1194 if updatecheck == mergemod.UPDATECHECK_ABORT:
1190 1195 cmdutil.bailifchanged(repo, merge=False)
1191 1196 updatecheck = mergemod.UPDATECHECK_NONE
1192 1197 ret = _update(repo, checkout, updatecheck=updatecheck)
1193 1198
1194 1199 if not ret and movemarkfrom:
1195 1200 if movemarkfrom == repo[b'.'].node():
1196 1201 pass # no-op update
1197 1202 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1198 1203 b = ui.label(repo._activebookmark, b'bookmarks.active')
1199 1204 ui.status(_(b"updating bookmark %s\n") % b)
1200 1205 else:
1201 1206 # this can happen with a non-linear update
1202 1207 b = ui.label(repo._activebookmark, b'bookmarks')
1203 1208 ui.status(_(b"(leaving bookmark %s)\n") % b)
1204 1209 bookmarks.deactivate(repo)
1205 1210 elif brev in repo._bookmarks:
1206 1211 if brev != repo._activebookmark:
1207 1212 b = ui.label(brev, b'bookmarks.active')
1208 1213 ui.status(_(b"(activating bookmark %s)\n") % b)
1209 1214 bookmarks.activate(repo, brev)
1210 1215 elif brev:
1211 1216 if repo._activebookmark:
1212 1217 b = ui.label(repo._activebookmark, b'bookmarks')
1213 1218 ui.status(_(b"(leaving bookmark %s)\n") % b)
1214 1219 bookmarks.deactivate(repo)
1215 1220
1216 1221 if warndest:
1217 1222 destutil.statusotherdests(ui, repo)
1218 1223
1219 1224 return ret
1220 1225
1221 1226
1222 1227 def merge(
1223 1228 ctx,
1224 1229 force=False,
1225 1230 remind=True,
1226 1231 labels=None,
1227 1232 ):
1228 1233 """Branch merge with node, resolving changes. Return true if any
1229 1234 unresolved conflicts."""
1230 1235 repo = ctx.repo()
1231 1236 stats = mergemod.merge(ctx, force=force, labels=labels)
1232 1237 _showstats(repo, stats)
1233 1238 if stats.unresolvedcount:
1234 1239 repo.ui.status(
1235 1240 _(
1236 1241 b"use 'hg resolve' to retry unresolved file merges "
1237 1242 b"or 'hg merge --abort' to abandon\n"
1238 1243 )
1239 1244 )
1240 1245 elif remind:
1241 1246 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1242 1247 return stats.unresolvedcount > 0
1243 1248
1244 1249
1245 1250 def abortmerge(ui, repo):
1246 1251 ms = mergestatemod.mergestate.read(repo)
1247 1252 if ms.active():
1248 1253 # there were conflicts
1249 1254 node = ms.localctx.hex()
1250 1255 else:
1251 1256 # there were no conficts, mergestate was not stored
1252 1257 node = repo[b'.'].hex()
1253 1258
1254 1259 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1255 1260 stats = mergemod.clean_update(repo[node])
1256 1261 assert stats.unresolvedcount == 0
1257 1262 _showstats(repo, stats)
1258 1263
1259 1264
1260 1265 def _incoming(
1261 1266 displaychlist,
1262 1267 subreporecurse,
1263 1268 ui,
1264 1269 repo,
1265 1270 source,
1266 1271 opts,
1267 1272 buffered=False,
1268 1273 subpath=None,
1269 1274 ):
1270 1275 """
1271 1276 Helper for incoming / gincoming.
1272 1277 displaychlist gets called with
1273 1278 (remoterepo, incomingchangesetlist, displayer) parameters,
1274 1279 and is supposed to contain only code that can't be unified.
1275 1280 """
1276 1281 srcs = urlutil.get_pull_paths(repo, ui, [source])
1277 1282 srcs = list(srcs)
1278 1283 if len(srcs) != 1:
1279 1284 msg = _(b'for now, incoming supports only a single source, %d provided')
1280 1285 msg %= len(srcs)
1281 1286 raise error.Abort(msg)
1282 1287 path = srcs[0]
1283 1288 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1284 1289 if subpath is not None:
1285 1290 subpath = urlutil.url(subpath)
1286 1291 if subpath.isabs():
1287 1292 source = bytes(subpath)
1288 1293 else:
1289 1294 p = urlutil.url(source)
1290 1295 if p.islocal():
1291 1296 normpath = os.path.normpath
1292 1297 else:
1293 1298 normpath = posixpath.normpath
1294 1299 p.path = normpath(b'%s/%s' % (p.path, subpath))
1295 1300 source = bytes(p)
1296 1301 other = peer(repo, opts, source)
1297 1302 cleanupfn = other.close
1298 1303 try:
1299 1304 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1300 1305 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1301 1306
1302 1307 if revs:
1303 1308 revs = [other.lookup(rev) for rev in revs]
1304 1309 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1305 1310 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1306 1311 )
1307 1312
1308 1313 if not chlist:
1309 1314 ui.status(_(b"no changes found\n"))
1310 1315 return subreporecurse()
1311 1316 ui.pager(b'incoming')
1312 1317 displayer = logcmdutil.changesetdisplayer(
1313 1318 ui, other, opts, buffered=buffered
1314 1319 )
1315 1320 displaychlist(other, chlist, displayer)
1316 1321 displayer.close()
1317 1322 finally:
1318 1323 cleanupfn()
1319 1324 subreporecurse()
1320 1325 return 0 # exit code is zero since we found incoming changes
1321 1326
1322 1327
1323 1328 def incoming(ui, repo, source, opts, subpath=None):
1324 1329 def subreporecurse():
1325 1330 ret = 1
1326 1331 if opts.get(b'subrepos'):
1327 1332 ctx = repo[None]
1328 1333 for subpath in sorted(ctx.substate):
1329 1334 sub = ctx.sub(subpath)
1330 1335 ret = min(ret, sub.incoming(ui, source, opts))
1331 1336 return ret
1332 1337
1333 1338 def display(other, chlist, displayer):
1334 1339 limit = logcmdutil.getlimit(opts)
1335 1340 if opts.get(b'newest_first'):
1336 1341 chlist.reverse()
1337 1342 count = 0
1338 1343 for n in chlist:
1339 1344 if limit is not None and count >= limit:
1340 1345 break
1341 1346 parents = [
1342 1347 p for p in other.changelog.parents(n) if p != repo.nullid
1343 1348 ]
1344 1349 if opts.get(b'no_merges') and len(parents) == 2:
1345 1350 continue
1346 1351 count += 1
1347 1352 displayer.show(other[n])
1348 1353
1349 1354 return _incoming(
1350 1355 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1351 1356 )
1352 1357
1353 1358
1354 1359 def _outgoing(ui, repo, dests, opts, subpath=None):
1355 1360 out = set()
1356 1361 others = []
1357 1362 for path in urlutil.get_push_paths(repo, ui, dests):
1358 1363 dest = path.pushloc or path.loc
1359 1364 if subpath is not None:
1360 1365 subpath = urlutil.url(subpath)
1361 1366 if subpath.isabs():
1362 1367 dest = bytes(subpath)
1363 1368 else:
1364 1369 p = urlutil.url(dest)
1365 1370 if p.islocal():
1366 1371 normpath = os.path.normpath
1367 1372 else:
1368 1373 normpath = posixpath.normpath
1369 1374 p.path = normpath(b'%s/%s' % (p.path, subpath))
1370 1375 dest = bytes(p)
1371 1376 branches = path.branch, opts.get(b'branch') or []
1372 1377
1373 1378 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1374 1379 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1375 1380 if revs:
1376 1381 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1377 1382
1378 1383 other = peer(repo, opts, dest)
1379 1384 try:
1380 1385 outgoing = discovery.findcommonoutgoing(
1381 1386 repo, other, revs, force=opts.get(b'force')
1382 1387 )
1383 1388 o = outgoing.missing
1384 1389 out.update(o)
1385 1390 if not o:
1386 1391 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1387 1392 others.append(other)
1388 1393 except: # re-raises
1389 1394 other.close()
1390 1395 raise
1391 1396 # make sure this is ordered by revision number
1392 1397 outgoing_revs = list(out)
1393 1398 cl = repo.changelog
1394 1399 outgoing_revs.sort(key=cl.rev)
1395 1400 return outgoing_revs, others
1396 1401
1397 1402
1398 1403 def _outgoing_recurse(ui, repo, dests, opts):
1399 1404 ret = 1
1400 1405 if opts.get(b'subrepos'):
1401 1406 ctx = repo[None]
1402 1407 for subpath in sorted(ctx.substate):
1403 1408 sub = ctx.sub(subpath)
1404 1409 ret = min(ret, sub.outgoing(ui, dests, opts))
1405 1410 return ret
1406 1411
1407 1412
1408 1413 def _outgoing_filter(repo, revs, opts):
1409 1414 """apply revision filtering/ordering option for outgoing"""
1410 1415 limit = logcmdutil.getlimit(opts)
1411 1416 no_merges = opts.get(b'no_merges')
1412 1417 if opts.get(b'newest_first'):
1413 1418 revs.reverse()
1414 1419 if limit is None and not no_merges:
1415 1420 for r in revs:
1416 1421 yield r
1417 1422 return
1418 1423
1419 1424 count = 0
1420 1425 cl = repo.changelog
1421 1426 for n in revs:
1422 1427 if limit is not None and count >= limit:
1423 1428 break
1424 1429 parents = [p for p in cl.parents(n) if p != repo.nullid]
1425 1430 if no_merges and len(parents) == 2:
1426 1431 continue
1427 1432 count += 1
1428 1433 yield n
1429 1434
1430 1435
1431 1436 def outgoing(ui, repo, dests, opts, subpath=None):
1432 1437 if opts.get(b'graph'):
1433 1438 logcmdutil.checkunsupportedgraphflags([], opts)
1434 1439 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1435 1440 ret = 1
1436 1441 try:
1437 1442 if o:
1438 1443 ret = 0
1439 1444
1440 1445 if opts.get(b'graph'):
1441 1446 revdag = logcmdutil.graphrevs(repo, o, opts)
1442 1447 ui.pager(b'outgoing')
1443 1448 displayer = logcmdutil.changesetdisplayer(
1444 1449 ui, repo, opts, buffered=True
1445 1450 )
1446 1451 logcmdutil.displaygraph(
1447 1452 ui, repo, revdag, displayer, graphmod.asciiedges
1448 1453 )
1449 1454 else:
1450 1455 ui.pager(b'outgoing')
1451 1456 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1452 1457 for n in _outgoing_filter(repo, o, opts):
1453 1458 displayer.show(repo[n])
1454 1459 displayer.close()
1455 1460 for oth in others:
1456 1461 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1457 1462 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1458 1463 return ret # exit code is zero since we found outgoing changes
1459 1464 finally:
1460 1465 for oth in others:
1461 1466 oth.close()
1462 1467
1463 1468
1464 1469 def verify(repo, level=None):
1465 1470 """verify the consistency of a repository"""
1466 1471 ret = verifymod.verify(repo, level=level)
1467 1472
1468 1473 # Broken subrepo references in hidden csets don't seem worth worrying about,
1469 1474 # since they can't be pushed/pulled, and --hidden can be used if they are a
1470 1475 # concern.
1471 1476
1472 1477 # pathto() is needed for -R case
1473 1478 revs = repo.revs(
1474 1479 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1475 1480 )
1476 1481
1477 1482 if revs:
1478 1483 repo.ui.status(_(b'checking subrepo links\n'))
1479 1484 for rev in revs:
1480 1485 ctx = repo[rev]
1481 1486 try:
1482 1487 for subpath in ctx.substate:
1483 1488 try:
1484 1489 ret = (
1485 1490 ctx.sub(subpath, allowcreate=False).verify() or ret
1486 1491 )
1487 1492 except error.RepoError as e:
1488 1493 repo.ui.warn(b'%d: %s\n' % (rev, e))
1489 1494 except Exception:
1490 1495 repo.ui.warn(
1491 1496 _(b'.hgsubstate is corrupt in revision %s\n')
1492 1497 % short(ctx.node())
1493 1498 )
1494 1499
1495 1500 return ret
1496 1501
1497 1502
1498 1503 def remoteui(src, opts):
1499 1504 """build a remote ui from ui or repo and opts"""
1500 1505 if util.safehasattr(src, b'baseui'): # looks like a repository
1501 1506 dst = src.baseui.copy() # drop repo-specific config
1502 1507 src = src.ui # copy target options from repo
1503 1508 else: # assume it's a global ui object
1504 1509 dst = src.copy() # keep all global options
1505 1510
1506 1511 # copy ssh-specific options
1507 1512 for o in b'ssh', b'remotecmd':
1508 1513 v = opts.get(o) or src.config(b'ui', o)
1509 1514 if v:
1510 1515 dst.setconfig(b"ui", o, v, b'copied')
1511 1516
1512 1517 # copy bundle-specific options
1513 1518 r = src.config(b'bundle', b'mainreporoot')
1514 1519 if r:
1515 1520 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1516 1521
1517 1522 # copy selected local settings to the remote ui
1518 1523 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1519 1524 for key, val in src.configitems(sect):
1520 1525 dst.setconfig(sect, key, val, b'copied')
1521 1526 v = src.config(b'web', b'cacerts')
1522 1527 if v:
1523 1528 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1524 1529
1525 1530 return dst
1526 1531
1527 1532
1528 1533 # Files of interest
1529 1534 # Used to check if the repository has changed looking at mtime and size of
1530 1535 # these files.
1531 1536 foi = [
1532 1537 (b'spath', b'00changelog.i'),
1533 1538 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1534 1539 (b'spath', b'obsstore'),
1535 1540 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1536 1541 ]
1537 1542
1538 1543
1539 1544 class cachedlocalrepo:
1540 1545 """Holds a localrepository that can be cached and reused."""
1541 1546
1542 1547 def __init__(self, repo):
1543 1548 """Create a new cached repo from an existing repo.
1544 1549
1545 1550 We assume the passed in repo was recently created. If the
1546 1551 repo has changed between when it was created and when it was
1547 1552 turned into a cache, it may not refresh properly.
1548 1553 """
1549 1554 assert isinstance(repo, localrepo.localrepository)
1550 1555 self._repo = repo
1551 1556 self._state, self.mtime = self._repostate()
1552 1557 self._filtername = repo.filtername
1553 1558
1554 1559 def fetch(self):
1555 1560 """Refresh (if necessary) and return a repository.
1556 1561
1557 1562 If the cached instance is out of date, it will be recreated
1558 1563 automatically and returned.
1559 1564
1560 1565 Returns a tuple of the repo and a boolean indicating whether a new
1561 1566 repo instance was created.
1562 1567 """
1563 1568 # We compare the mtimes and sizes of some well-known files to
1564 1569 # determine if the repo changed. This is not precise, as mtimes
1565 1570 # are susceptible to clock skew and imprecise filesystems and
1566 1571 # file content can change while maintaining the same size.
1567 1572
1568 1573 state, mtime = self._repostate()
1569 1574 if state == self._state:
1570 1575 return self._repo, False
1571 1576
1572 1577 repo = repository(self._repo.baseui, self._repo.url())
1573 1578 if self._filtername:
1574 1579 self._repo = repo.filtered(self._filtername)
1575 1580 else:
1576 1581 self._repo = repo.unfiltered()
1577 1582 self._state = state
1578 1583 self.mtime = mtime
1579 1584
1580 1585 return self._repo, True
1581 1586
1582 1587 def _repostate(self):
1583 1588 state = []
1584 1589 maxmtime = -1
1585 1590 for attr, fname in foi:
1586 1591 prefix = getattr(self._repo, attr)
1587 1592 p = os.path.join(prefix, fname)
1588 1593 try:
1589 1594 st = os.stat(p)
1590 1595 except OSError:
1591 1596 st = os.stat(prefix)
1592 1597 state.append((st[stat.ST_MTIME], st.st_size))
1593 1598 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1594 1599
1595 1600 return tuple(state), maxmtime
1596 1601
1597 1602 def copy(self):
1598 1603 """Obtain a copy of this class instance.
1599 1604
1600 1605 A new localrepository instance is obtained. The new instance should be
1601 1606 completely independent of the original.
1602 1607 """
1603 1608 repo = repository(self._repo.baseui, self._repo.origroot)
1604 1609 if self._filtername:
1605 1610 repo = repo.filtered(self._filtername)
1606 1611 else:
1607 1612 repo = repo.unfiltered()
1608 1613 c = cachedlocalrepo(repo)
1609 1614 c._state = self._state
1610 1615 c.mtime = self.mtime
1611 1616 return c
General Comments 0
You need to be logged in to leave comments. Login now