##// END OF EJS Templates
typing: add a type hint to `mercurial/hg.py`...
Matt Harbison -
r52567:e8f58714 default
parent child Browse files
Show More
@@ -1,1680 +1,1686
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 import typing
14 15 import weakref
15 16
16 17 from .i18n import _
17 18 from .node import (
18 19 hex,
19 20 sha1nodeconstants,
20 21 short,
21 22 )
22 23
23 24 from . import (
24 25 bookmarks,
25 26 bundlerepo,
26 27 cmdutil,
27 28 destutil,
28 29 discovery,
29 30 error,
30 31 exchange,
31 32 extensions,
32 33 graphmod,
33 34 httppeer,
34 35 localrepo,
35 36 lock,
36 37 logcmdutil,
37 38 logexchange,
38 39 merge as mergemod,
39 40 mergestate as mergestatemod,
40 41 narrowspec,
41 42 phases,
42 43 requirements,
43 44 scmutil,
44 45 sshpeer,
45 46 statichttprepo,
46 47 ui as uimod,
47 48 unionrepo,
48 49 url,
49 50 util,
50 51 verify as verifymod,
51 52 vfs as vfsmod,
52 53 )
53 54 from .interfaces import repository as repositorymod
54 55 from .utils import (
55 56 hashutil,
56 57 stringutil,
57 58 urlutil,
58 59 )
59 60
61 if typing.TYPE_CHECKING:
62 from typing import (
63 List,
64 Tuple,
65 )
60 66
61 67 release = lock.release
62 68
63 69 # shared features
64 70 sharedbookmarks = b'bookmarks'
65 71
66 72
67 73 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
68 74 if hasattr(other, 'peer'):
69 75 # a courtesy to callers using a localrepo for other
70 76 peer = other.peer(remotehidden=remotehidden)
71 77 else:
72 78 peer = other
73 79 hashbranch, branches = branches
74 80 if not hashbranch and not branches:
75 81 x = revs or None
76 82 if revs:
77 83 y = revs[0]
78 84 else:
79 85 y = None
80 86 return x, y
81 87 if revs:
82 88 revs = list(revs)
83 89 else:
84 90 revs = []
85 91
86 92 if not peer.capable(b'branchmap'):
87 93 if branches:
88 94 raise error.Abort(_(b"remote branch lookup not supported"))
89 95 revs.append(hashbranch)
90 96 return revs, revs[0]
91 97
92 98 with peer.commandexecutor() as e:
93 99 branchmap = e.callcommand(b'branchmap', {}).result()
94 100
95 101 def primary(branch):
96 102 if branch == b'.':
97 103 if not lrepo:
98 104 raise error.Abort(_(b"dirstate branch not accessible"))
99 105 branch = lrepo.dirstate.branch()
100 106 if branch in branchmap:
101 107 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 108 return True
103 109 else:
104 110 return False
105 111
106 112 for branch in branches:
107 113 if not primary(branch):
108 114 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 115 if hashbranch:
110 116 if not primary(hashbranch):
111 117 revs.append(hashbranch)
112 118 return revs, revs[0]
113 119
114 120
115 121 def _isfile(path):
116 122 try:
117 123 # we use os.stat() directly here instead of os.path.isfile()
118 124 # because the latter started returning `False` on invalid path
119 125 # exceptions starting in 3.8 and we care about handling
120 126 # invalid paths specially here.
121 127 st = os.stat(path)
122 128 except ValueError as e:
123 129 msg = stringutil.forcebytestr(e)
124 130 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 131 except OSError:
126 132 return False
127 133 else:
128 134 return stat.S_ISREG(st.st_mode)
129 135
130 136
131 137 class LocalFactory:
132 138 """thin wrapper to dispatch between localrepo and bundle repo"""
133 139
134 140 @staticmethod
135 141 def islocal(path: bytes) -> bool:
136 142 path = util.expandpath(urlutil.urllocalpath(path))
137 143 return not _isfile(path)
138 144
139 145 @staticmethod
140 146 def instance(ui, path, *args, **kwargs):
141 147 path = util.expandpath(urlutil.urllocalpath(path))
142 148 if _isfile(path):
143 149 cls = bundlerepo
144 150 else:
145 151 cls = localrepo
146 152 return cls.instance(ui, path, *args, **kwargs)
147 153
148 154
149 155 repo_schemes = {
150 156 b'bundle': bundlerepo,
151 157 b'union': unionrepo,
152 158 b'file': LocalFactory,
153 159 }
154 160
155 161 peer_schemes = {
156 162 b'http': httppeer,
157 163 b'https': httppeer,
158 164 b'ssh': sshpeer,
159 165 b'static-http': statichttprepo,
160 166 }
161 167
162 168
163 169 def islocal(repo):
164 170 '''return true if repo (or path pointing to repo) is local'''
165 171 if isinstance(repo, bytes):
166 172 u = urlutil.url(repo)
167 173 scheme = u.scheme or b'file'
168 174 if scheme in peer_schemes:
169 175 cls = peer_schemes[scheme]
170 176 cls.make_peer # make sure we load the module
171 177 elif scheme in repo_schemes:
172 178 cls = repo_schemes[scheme]
173 179 cls.instance # make sure we load the module
174 180 else:
175 181 cls = LocalFactory
176 182 if hasattr(cls, 'islocal'):
177 183 return cls.islocal(repo) # pytype: disable=module-attr
178 184 return False
179 185 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 186 return repo.local()
181 187
182 188
183 189 def openpath(ui, path, sendaccept=True):
184 190 '''open path with open if local, url.open if remote'''
185 191 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 192 if pathurl.islocal():
187 193 return util.posixfile(pathurl.localpath(), b'rb')
188 194 else:
189 195 return url.open(ui, path, sendaccept=sendaccept)
190 196
191 197
192 198 # a list of (ui, repo) functions called for wire peer initialization
193 199 wirepeersetupfuncs = []
194 200
195 201
196 202 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 203 ui = getattr(obj, "ui", ui)
198 204 for f in presetupfuncs or []:
199 205 f(ui, obj)
200 206 ui.log(b'extension', b'- executing reposetup hooks\n')
201 207 with util.timedcm('all reposetup') as allreposetupstats:
202 208 for name, module in extensions.extensions(ui):
203 209 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 210 hook = getattr(module, 'reposetup', None)
205 211 if hook:
206 212 with util.timedcm('reposetup %r', name) as stats:
207 213 hook(ui, obj)
208 214 msg = b' > reposetup for %s took %s\n'
209 215 ui.log(b'extension', msg, name, stats)
210 216 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 217 if not obj.local():
212 218 for f in wirepeersetupfuncs:
213 219 f(ui, obj)
214 220
215 221
216 222 def repository(
217 223 ui,
218 224 path=b'',
219 225 create=False,
220 226 presetupfuncs=None,
221 227 intents=None,
222 228 createopts=None,
223 229 ):
224 230 """return a repository object for the specified path"""
225 231 scheme = urlutil.url(path).scheme
226 232 if scheme is None:
227 233 scheme = b'file'
228 234 cls = repo_schemes.get(scheme)
229 235 if cls is None:
230 236 if scheme in peer_schemes:
231 237 raise error.Abort(_(b"repository '%s' is not local") % path)
232 238 cls = LocalFactory
233 239 repo = cls.instance(
234 240 ui,
235 241 path,
236 242 create,
237 243 intents=intents,
238 244 createopts=createopts,
239 245 )
240 246 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 247 return repo.filtered(b'visible')
242 248
243 249
244 250 def peer(
245 251 uiorrepo,
246 252 opts,
247 253 path,
248 254 create=False,
249 255 intents=None,
250 256 createopts=None,
251 257 remotehidden=False,
252 258 ):
253 259 '''return a repository peer for the specified path'''
254 260 ui = getattr(uiorrepo, 'ui', uiorrepo)
255 261 rui = remoteui(uiorrepo, opts)
256 262 if hasattr(path, 'url'):
257 263 # this is already a urlutil.path object
258 264 peer_path = path
259 265 else:
260 266 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
261 267 scheme = peer_path.url.scheme # pytype: disable=attribute-error
262 268 if scheme in peer_schemes:
263 269 cls = peer_schemes[scheme]
264 270 peer = cls.make_peer(
265 271 rui,
266 272 peer_path,
267 273 create,
268 274 intents=intents,
269 275 createopts=createopts,
270 276 remotehidden=remotehidden,
271 277 )
272 278 _setup_repo_or_peer(rui, peer)
273 279 else:
274 280 # this is a repository
275 281 repo_path = peer_path.loc # pytype: disable=attribute-error
276 282 if not repo_path:
277 283 repo_path = peer_path.rawloc # pytype: disable=attribute-error
278 284 repo = repository(
279 285 rui,
280 286 repo_path,
281 287 create,
282 288 intents=intents,
283 289 createopts=createopts,
284 290 )
285 291 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
286 292 return peer
287 293
288 294
289 295 def defaultdest(source):
290 296 """return default destination of clone if none is given
291 297
292 298 >>> defaultdest(b'foo')
293 299 'foo'
294 300 >>> defaultdest(b'/foo/bar')
295 301 'bar'
296 302 >>> defaultdest(b'/')
297 303 ''
298 304 >>> defaultdest(b'')
299 305 ''
300 306 >>> defaultdest(b'http://example.org/')
301 307 ''
302 308 >>> defaultdest(b'http://example.org/foo/')
303 309 'foo'
304 310 """
305 311 path = urlutil.url(source).path
306 312 if not path:
307 313 return b''
308 314 return os.path.basename(os.path.normpath(path))
309 315
310 316
311 317 def sharedreposource(repo):
312 318 """Returns repository object for source repository of a shared repo.
313 319
314 320 If repo is not a shared repository, returns None.
315 321 """
316 322 if repo.sharedpath == repo.path:
317 323 return None
318 324
319 325 if hasattr(repo, 'srcrepo') and repo.srcrepo:
320 326 return repo.srcrepo
321 327
322 328 # the sharedpath always ends in the .hg; we want the path to the repo
323 329 source = repo.vfs.split(repo.sharedpath)[0]
324 330 srcurl, branches = urlutil.parseurl(source)
325 331 srcrepo = repository(repo.ui, srcurl)
326 332 repo.srcrepo = srcrepo
327 333 return srcrepo
328 334
329 335
330 336 def share(
331 337 ui,
332 338 source,
333 339 dest=None,
334 340 update=True,
335 341 bookmarks=True,
336 342 defaultpath=None,
337 343 relative=False,
338 344 ):
339 345 '''create a shared repository'''
340 346
341 347 not_local_msg = _(b'can only share local repositories')
342 348 if hasattr(source, 'local'):
343 349 if source.local() is None:
344 350 raise error.Abort(not_local_msg)
345 351 elif not islocal(source):
346 352 # XXX why are we getting bytes here ?
347 353 raise error.Abort(not_local_msg)
348 354
349 355 if not dest:
350 356 dest = defaultdest(source)
351 357 else:
352 358 dest = urlutil.get_clone_path_obj(ui, dest).loc
353 359
354 360 if isinstance(source, bytes):
355 361 source_path = urlutil.get_clone_path_obj(ui, source)
356 362 srcrepo = repository(ui, source_path.loc)
357 363 branches = (source_path.branch, [])
358 364 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
359 365 else:
360 366 srcrepo = source.local()
361 367 checkout = None
362 368
363 369 shareditems = set()
364 370 if bookmarks:
365 371 shareditems.add(sharedbookmarks)
366 372
367 373 r = repository(
368 374 ui,
369 375 dest,
370 376 create=True,
371 377 createopts={
372 378 b'sharedrepo': srcrepo,
373 379 b'sharedrelative': relative,
374 380 b'shareditems': shareditems,
375 381 },
376 382 )
377 383
378 384 postshare(srcrepo, r, defaultpath=defaultpath)
379 385 r = repository(ui, dest)
380 386 _postshareupdate(r, update, checkout=checkout)
381 387 return r
382 388
383 389
384 390 def _prependsourcehgrc(repo):
385 391 """copies the source repo config and prepend it in current repo .hg/hgrc
386 392 on unshare. This is only done if the share was perfomed using share safe
387 393 method where we share config of source in shares"""
388 394 srcvfs = vfsmod.vfs(repo.sharedpath)
389 395 dstvfs = vfsmod.vfs(repo.path)
390 396
391 397 if not srcvfs.exists(b'hgrc'):
392 398 return
393 399
394 400 currentconfig = b''
395 401 if dstvfs.exists(b'hgrc'):
396 402 currentconfig = dstvfs.read(b'hgrc')
397 403
398 404 with dstvfs(b'hgrc', b'wb') as fp:
399 405 sourceconfig = srcvfs.read(b'hgrc')
400 406 fp.write(b"# Config copied from shared source\n")
401 407 fp.write(sourceconfig)
402 408 fp.write(b'\n')
403 409 fp.write(currentconfig)
404 410
405 411
406 412 def unshare(ui, repo):
407 413 """convert a shared repository to a normal one
408 414
409 415 Copy the store data to the repo and remove the sharedpath data.
410 416
411 417 Returns a new repository object representing the unshared repository.
412 418
413 419 The passed repository object is not usable after this function is
414 420 called.
415 421 """
416 422
417 423 with repo.lock():
418 424 # we use locks here because if we race with commit, we
419 425 # can end up with extra data in the cloned revlogs that's
420 426 # not pointed to by changesets, thus causing verify to
421 427 # fail
422 428 destlock = copystore(ui, repo, repo.path)
423 429 with destlock or util.nullcontextmanager():
424 430 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
425 431 # we were sharing .hg/hgrc of the share source with the current
426 432 # repo. We need to copy that while unsharing otherwise it can
427 433 # disable hooks and other checks
428 434 _prependsourcehgrc(repo)
429 435
430 436 sharefile = repo.vfs.join(b'sharedpath')
431 437 util.rename(sharefile, sharefile + b'.old')
432 438
433 439 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
434 440 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
435 441 scmutil.writereporequirements(repo)
436 442
437 443 # Removing share changes some fundamental properties of the repo instance.
438 444 # So we instantiate a new repo object and operate on it rather than
439 445 # try to keep the existing repo usable.
440 446 newrepo = repository(repo.baseui, repo.root, create=False)
441 447
442 448 # TODO: figure out how to access subrepos that exist, but were previously
443 449 # removed from .hgsub
444 450 c = newrepo[b'.']
445 451 subs = c.substate
446 452 for s in sorted(subs):
447 453 c.sub(s).unshare()
448 454
449 455 localrepo.poisonrepository(repo)
450 456
451 457 return newrepo
452 458
453 459
454 460 def postshare(sourcerepo, destrepo, defaultpath=None):
455 461 """Called after a new shared repo is created.
456 462
457 463 The new repo only has a requirements file and pointer to the source.
458 464 This function configures additional shared data.
459 465
460 466 Extensions can wrap this function and write additional entries to
461 467 destrepo/.hg/shared to indicate additional pieces of data to be shared.
462 468 """
463 469 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
464 470 if default:
465 471 template = b'[paths]\ndefault = %s\n'
466 472 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
467 473 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
468 474 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
469 475 b"narrow-share"
470 476 ):
471 477 narrowspec.copytoworkingcopy(destrepo)
472 478
473 479
474 480 def _postshareupdate(repo, update, checkout=None):
475 481 """Maybe perform a working directory update after a shared repo is created.
476 482
477 483 ``update`` can be a boolean or a revision to update to.
478 484 """
479 485 if not update:
480 486 return
481 487
482 488 repo.ui.status(_(b"updating working directory\n"))
483 489 if update is not True:
484 490 checkout = update
485 491 for test in (checkout, b'default', b'tip'):
486 492 if test is None:
487 493 continue
488 494 try:
489 495 uprev = repo.lookup(test)
490 496 break
491 497 except error.RepoLookupError:
492 498 continue
493 499 _update(repo, uprev)
494 500
495 501
496 502 def copystore(ui, srcrepo, destpath):
497 503 """copy files from store of srcrepo in destpath
498 504
499 505 returns destlock
500 506 """
501 507 destlock = None
502 508 try:
503 509 hardlink = None
504 510 topic = _(b'linking') if hardlink else _(b'copying')
505 511 with ui.makeprogress(topic, unit=_(b'files')) as progress:
506 512 num = 0
507 513 srcpublishing = srcrepo.publishing()
508 514 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
509 515 dstvfs = vfsmod.vfs(destpath)
510 516 for f in srcrepo.store.copylist():
511 517 if srcpublishing and f.endswith(b'phaseroots'):
512 518 continue
513 519 dstbase = os.path.dirname(f)
514 520 if dstbase and not dstvfs.exists(dstbase):
515 521 dstvfs.mkdir(dstbase)
516 522 if srcvfs.exists(f):
517 523 if f.endswith(b'data'):
518 524 # 'dstbase' may be empty (e.g. revlog format 0)
519 525 lockfile = os.path.join(dstbase, b"lock")
520 526 # lock to avoid premature writing to the target
521 527 destlock = lock.lock(dstvfs, lockfile)
522 528 hardlink, n = util.copyfiles(
523 529 srcvfs.join(f), dstvfs.join(f), hardlink, progress
524 530 )
525 531 num += n
526 532 if hardlink:
527 533 ui.debug(b"linked %d files\n" % num)
528 534 else:
529 535 ui.debug(b"copied %d files\n" % num)
530 536 return destlock
531 537 except: # re-raises
532 538 release(destlock)
533 539 raise
534 540
535 541
536 542 def clonewithshare(
537 543 ui,
538 544 peeropts,
539 545 sharepath,
540 546 source,
541 547 srcpeer,
542 548 dest,
543 549 pull=False,
544 550 rev=None,
545 551 update=True,
546 552 stream=False,
547 553 ):
548 554 """Perform a clone using a shared repo.
549 555
550 556 The store for the repository will be located at <sharepath>/.hg. The
551 557 specified revisions will be cloned or pulled from "source". A shared repo
552 558 will be created at "dest" and a working copy will be created if "update" is
553 559 True.
554 560 """
555 561 revs = None
556 562 if rev:
557 563 if not srcpeer.capable(b'lookup'):
558 564 raise error.Abort(
559 565 _(
560 566 b"src repository does not support "
561 567 b"revision lookup and so doesn't "
562 568 b"support clone by revision"
563 569 )
564 570 )
565 571
566 572 # TODO this is batchable.
567 573 remoterevs = []
568 574 for r in rev:
569 575 with srcpeer.commandexecutor() as e:
570 576 remoterevs.append(
571 577 e.callcommand(
572 578 b'lookup',
573 579 {
574 580 b'key': r,
575 581 },
576 582 ).result()
577 583 )
578 584 revs = remoterevs
579 585
580 586 # Obtain a lock before checking for or cloning the pooled repo otherwise
581 587 # 2 clients may race creating or populating it.
582 588 pooldir = os.path.dirname(sharepath)
583 589 # lock class requires the directory to exist.
584 590 try:
585 591 util.makedir(pooldir, False)
586 592 except FileExistsError:
587 593 pass
588 594
589 595 poolvfs = vfsmod.vfs(pooldir)
590 596 basename = os.path.basename(sharepath)
591 597
592 598 with lock.lock(poolvfs, b'%s.lock' % basename):
593 599 if os.path.exists(sharepath):
594 600 ui.status(
595 601 _(b'(sharing from existing pooled repository %s)\n') % basename
596 602 )
597 603 else:
598 604 ui.status(
599 605 _(b'(sharing from new pooled repository %s)\n') % basename
600 606 )
601 607 # Always use pull mode because hardlinks in share mode don't work
602 608 # well. Never update because working copies aren't necessary in
603 609 # share mode.
604 610 clone(
605 611 ui,
606 612 peeropts,
607 613 source,
608 614 dest=sharepath,
609 615 pull=True,
610 616 revs=rev,
611 617 update=False,
612 618 stream=stream,
613 619 )
614 620
615 621 # Resolve the value to put in [paths] section for the source.
616 622 if islocal(source):
617 623 defaultpath = util.abspath(urlutil.urllocalpath(source))
618 624 else:
619 625 defaultpath = source
620 626
621 627 sharerepo = repository(ui, path=sharepath)
622 628 destrepo = share(
623 629 ui,
624 630 sharerepo,
625 631 dest=dest,
626 632 update=False,
627 633 bookmarks=False,
628 634 defaultpath=defaultpath,
629 635 )
630 636
631 637 # We need to perform a pull against the dest repo to fetch bookmarks
632 638 # and other non-store data that isn't shared by default. In the case of
633 639 # non-existing shared repo, this means we pull from the remote twice. This
634 640 # is a bit weird. But at the time it was implemented, there wasn't an easy
635 641 # way to pull just non-changegroup data.
636 642 exchange.pull(destrepo, srcpeer, heads=revs)
637 643
638 644 _postshareupdate(destrepo, update)
639 645
640 646 return srcpeer, peer(ui, peeropts, dest)
641 647
642 648
643 649 # Recomputing caches is often slow on big repos, so copy them.
644 650 def _copycache(srcrepo, dstcachedir, fname):
645 651 """copy a cache from srcrepo to destcachedir (if it exists)"""
646 652 srcfname = srcrepo.cachevfs.join(fname)
647 653 dstfname = os.path.join(dstcachedir, fname)
648 654 if os.path.exists(srcfname):
649 655 if not os.path.exists(dstcachedir):
650 656 os.mkdir(dstcachedir)
651 657 util.copyfile(srcfname, dstfname)
652 658
653 659
654 660 def clone(
655 661 ui,
656 662 peeropts,
657 663 source,
658 664 dest=None,
659 665 pull=False,
660 666 revs=None,
661 667 update=True,
662 668 stream=False,
663 669 branch=None,
664 670 shareopts=None,
665 671 storeincludepats=None,
666 672 storeexcludepats=None,
667 673 depth=None,
668 674 ):
669 675 """Make a copy of an existing repository.
670 676
671 677 Create a copy of an existing repository in a new directory. The
672 678 source and destination are URLs, as passed to the repository
673 679 function. Returns a pair of repository peers, the source and
674 680 newly created destination.
675 681
676 682 The location of the source is added to the new repository's
677 683 .hg/hgrc file, as the default to be used for future pulls and
678 684 pushes.
679 685
680 686 If an exception is raised, the partly cloned/updated destination
681 687 repository will be deleted.
682 688
683 689 Arguments:
684 690
685 691 source: repository object or URL
686 692
687 693 dest: URL of destination repository to create (defaults to base
688 694 name of source repository)
689 695
690 696 pull: always pull from source repository, even in local case or if the
691 697 server prefers streaming
692 698
693 699 stream: stream raw data uncompressed from repository (fast over
694 700 LAN, slow over WAN)
695 701
696 702 revs: revision to clone up to (implies pull=True)
697 703
698 704 update: update working directory after clone completes, if
699 705 destination is local repository (True means update to default rev,
700 706 anything else is treated as a revision)
701 707
702 708 branch: branches to clone
703 709
704 710 shareopts: dict of options to control auto sharing behavior. The "pool" key
705 711 activates auto sharing mode and defines the directory for stores. The
706 712 "mode" key determines how to construct the directory name of the shared
707 713 repository. "identity" means the name is derived from the node of the first
708 714 changeset in the repository. "remote" means the name is derived from the
709 715 remote's path/URL. Defaults to "identity."
710 716
711 717 storeincludepats and storeexcludepats: sets of file patterns to include and
712 718 exclude in the repository copy, respectively. If not defined, all files
713 719 will be included (a "full" clone). Otherwise a "narrow" clone containing
714 720 only the requested files will be performed. If ``storeincludepats`` is not
715 721 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
716 722 ``path:.``. If both are empty sets, no files will be cloned.
717 723 """
718 724
719 725 if isinstance(source, bytes):
720 726 src_path = urlutil.get_clone_path_obj(ui, source)
721 727 if src_path is None:
722 728 srcpeer = peer(ui, peeropts, b'')
723 729 origsource = source = b''
724 730 branches = (None, branch or [])
725 731 else:
726 732 srcpeer = peer(ui, peeropts, src_path)
727 733 origsource = src_path.rawloc
728 734 branches = (src_path.branch, branch or [])
729 735 source = src_path.loc
730 736 else:
731 737 if hasattr(source, 'peer'):
732 738 srcpeer = source.peer() # in case we were called with a localrepo
733 739 else:
734 740 srcpeer = source
735 741 branches = (None, branch or [])
736 742 # XXX path: simply use the peer `path` object when this become available
737 743 origsource = source = srcpeer.url()
738 744 srclock = destlock = destwlock = cleandir = None
739 745 destpeer = None
740 746 try:
741 747 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
742 748
743 749 if dest is None:
744 750 dest = defaultdest(source)
745 751 if dest:
746 752 ui.status(_(b"destination directory: %s\n") % dest)
747 753 else:
748 754 dest_path = urlutil.get_clone_path_obj(ui, dest)
749 755 if dest_path is not None:
750 756 dest = dest_path.rawloc
751 757 else:
752 758 dest = b''
753 759
754 760 dest = urlutil.urllocalpath(dest)
755 761 source = urlutil.urllocalpath(source)
756 762
757 763 if not dest:
758 764 raise error.InputError(_(b"empty destination path is not valid"))
759 765
760 766 destvfs = vfsmod.vfs(dest, expandpath=True)
761 767 if destvfs.lexists():
762 768 if not destvfs.isdir():
763 769 raise error.InputError(
764 770 _(b"destination '%s' already exists") % dest
765 771 )
766 772 elif destvfs.listdir():
767 773 raise error.InputError(
768 774 _(b"destination '%s' is not empty") % dest
769 775 )
770 776
771 777 createopts = {}
772 778 narrow = False
773 779
774 780 if storeincludepats is not None:
775 781 narrowspec.validatepatterns(storeincludepats)
776 782 narrow = True
777 783
778 784 if storeexcludepats is not None:
779 785 narrowspec.validatepatterns(storeexcludepats)
780 786 narrow = True
781 787
782 788 if narrow:
783 789 # Include everything by default if only exclusion patterns defined.
784 790 if storeexcludepats and not storeincludepats:
785 791 storeincludepats = {b'path:.'}
786 792
787 793 createopts[b'narrowfiles'] = True
788 794
789 795 if depth:
790 796 createopts[b'shallowfilestore'] = True
791 797
792 798 if srcpeer.capable(b'lfs-serve'):
793 799 # Repository creation honors the config if it disabled the extension, so
794 800 # we can't just announce that lfs will be enabled. This check avoids
795 801 # saying that lfs will be enabled, and then saying it's an unknown
796 802 # feature. The lfs creation option is set in either case so that a
797 803 # requirement is added. If the extension is explicitly disabled but the
798 804 # requirement is set, the clone aborts early, before transferring any
799 805 # data.
800 806 createopts[b'lfs'] = True
801 807
802 808 if b'lfs' in extensions.disabled():
803 809 ui.status(
804 810 _(
805 811 b'(remote is using large file support (lfs), but it is '
806 812 b'explicitly disabled in the local configuration)\n'
807 813 )
808 814 )
809 815 else:
810 816 ui.status(
811 817 _(
812 818 b'(remote is using large file support (lfs); lfs will '
813 819 b'be enabled for this repository)\n'
814 820 )
815 821 )
816 822
817 823 shareopts = shareopts or {}
818 824 sharepool = shareopts.get(b'pool')
819 825 sharenamemode = shareopts.get(b'mode')
820 826 if sharepool and islocal(dest):
821 827 sharepath = None
822 828 if sharenamemode == b'identity':
823 829 # Resolve the name from the initial changeset in the remote
824 830 # repository. This returns nullid when the remote is empty. It
825 831 # raises RepoLookupError if revision 0 is filtered or otherwise
826 832 # not available. If we fail to resolve, sharing is not enabled.
827 833 try:
828 834 with srcpeer.commandexecutor() as e:
829 835 rootnode = e.callcommand(
830 836 b'lookup',
831 837 {
832 838 b'key': b'0',
833 839 },
834 840 ).result()
835 841
836 842 if rootnode != sha1nodeconstants.nullid:
837 843 sharepath = os.path.join(sharepool, hex(rootnode))
838 844 else:
839 845 ui.status(
840 846 _(
841 847 b'(not using pooled storage: '
842 848 b'remote appears to be empty)\n'
843 849 )
844 850 )
845 851 except error.RepoLookupError:
846 852 ui.status(
847 853 _(
848 854 b'(not using pooled storage: '
849 855 b'unable to resolve identity of remote)\n'
850 856 )
851 857 )
852 858 elif sharenamemode == b'remote':
853 859 sharepath = os.path.join(
854 860 sharepool, hex(hashutil.sha1(source).digest())
855 861 )
856 862 else:
857 863 raise error.Abort(
858 864 _(b'unknown share naming mode: %s') % sharenamemode
859 865 )
860 866
861 867 # TODO this is a somewhat arbitrary restriction.
862 868 if narrow:
863 869 ui.status(
864 870 _(b'(pooled storage not supported for narrow clones)\n')
865 871 )
866 872 sharepath = None
867 873
868 874 if sharepath:
869 875 return clonewithshare(
870 876 ui,
871 877 peeropts,
872 878 sharepath,
873 879 source,
874 880 srcpeer,
875 881 dest,
876 882 pull=pull,
877 883 rev=revs,
878 884 update=update,
879 885 stream=stream,
880 886 )
881 887
882 888 srcrepo = srcpeer.local()
883 889
884 890 abspath = origsource
885 891 if islocal(origsource):
886 892 abspath = util.abspath(urlutil.urllocalpath(origsource))
887 893
888 894 if islocal(dest):
889 895 if os.path.exists(dest):
890 896 # only clean up directories we create ourselves
891 897 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
892 898 cleandir = hgdir
893 899 else:
894 900 cleandir = dest
895 901
896 902 copy = False
897 903 if (
898 904 srcrepo
899 905 and srcrepo.cancopy()
900 906 and islocal(dest)
901 907 and not phases.hassecret(srcrepo)
902 908 ):
903 909 copy = not pull and not revs
904 910
905 911 # TODO this is a somewhat arbitrary restriction.
906 912 if narrow:
907 913 copy = False
908 914
909 915 if copy:
910 916 try:
911 917 # we use a lock here because if we race with commit, we
912 918 # can end up with extra data in the cloned revlogs that's
913 919 # not pointed to by changesets, thus causing verify to
914 920 # fail
915 921 srclock = srcrepo.lock(wait=False)
916 922 except error.LockError:
917 923 copy = False
918 924
919 925 if copy:
920 926 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
921 927
922 928 destrootpath = urlutil.urllocalpath(dest)
923 929 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
924 930 localrepo.createrepository(
925 931 ui,
926 932 destrootpath,
927 933 requirements=dest_reqs,
928 934 )
929 935 destrepo = localrepo.makelocalrepository(ui, destrootpath)
930 936
931 937 destwlock = destrepo.wlock()
932 938 destlock = destrepo.lock()
933 939 from . import streamclone # avoid cycle
934 940
935 941 streamclone.local_copy(srcrepo, destrepo)
936 942
937 943 # we need to re-init the repo after manually copying the data
938 944 # into it
939 945 destpeer = peer(srcrepo, peeropts, dest)
940 946
941 947 # make the peer aware that is it already locked
942 948 #
943 949 # important:
944 950 #
945 951 # We still need to release that lock at the end of the function
946 952 destpeer.local()._lockref = weakref.ref(destlock)
947 953 destpeer.local()._wlockref = weakref.ref(destwlock)
948 954 # dirstate also needs to be copied because `_wlockref` has a reference
949 955 # to it: this dirstate is saved to disk when the wlock is released
950 956 destpeer.local().dirstate = destrepo.dirstate
951 957
952 958 srcrepo.hook(
953 959 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
954 960 )
955 961 else:
956 962 try:
957 963 # only pass ui when no srcrepo
958 964 destpeer = peer(
959 965 srcrepo or ui,
960 966 peeropts,
961 967 dest,
962 968 create=True,
963 969 createopts=createopts,
964 970 )
965 971 except FileExistsError:
966 972 cleandir = None
967 973 raise error.Abort(_(b"destination '%s' already exists") % dest)
968 974
969 975 if revs:
970 976 if not srcpeer.capable(b'lookup'):
971 977 raise error.Abort(
972 978 _(
973 979 b"src repository does not support "
974 980 b"revision lookup and so doesn't "
975 981 b"support clone by revision"
976 982 )
977 983 )
978 984
979 985 # TODO this is batchable.
980 986 remoterevs = []
981 987 for rev in revs:
982 988 with srcpeer.commandexecutor() as e:
983 989 remoterevs.append(
984 990 e.callcommand(
985 991 b'lookup',
986 992 {
987 993 b'key': rev,
988 994 },
989 995 ).result()
990 996 )
991 997 revs = remoterevs
992 998
993 999 checkout = revs[0]
994 1000 else:
995 1001 revs = None
996 1002 local = destpeer.local()
997 1003 if local:
998 1004 if narrow:
999 1005 with local.wlock(), local.lock(), local.transaction(
1000 1006 b'narrow-clone'
1001 1007 ):
1002 1008 local.setnarrowpats(storeincludepats, storeexcludepats)
1003 1009 narrowspec.copytoworkingcopy(local)
1004 1010
1005 1011 u = urlutil.url(abspath)
1006 1012 defaulturl = bytes(u)
1007 1013 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1008 1014 if not stream:
1009 1015 if pull:
1010 1016 stream = False
1011 1017 else:
1012 1018 stream = None
1013 1019 # internal config: ui.quietbookmarkmove
1014 1020 overrides = {(b'ui', b'quietbookmarkmove'): True}
1015 1021 with local.ui.configoverride(overrides, b'clone'):
1016 1022 exchange.pull(
1017 1023 local,
1018 1024 srcpeer,
1019 1025 heads=revs,
1020 1026 streamclonerequested=stream,
1021 1027 includepats=storeincludepats,
1022 1028 excludepats=storeexcludepats,
1023 1029 depth=depth,
1024 1030 )
1025 1031 elif srcrepo:
1026 1032 # TODO lift restriction once exchange.push() accepts narrow
1027 1033 # push.
1028 1034 if narrow:
1029 1035 raise error.Abort(
1030 1036 _(
1031 1037 b'narrow clone not available for '
1032 1038 b'remote destinations'
1033 1039 )
1034 1040 )
1035 1041
1036 1042 exchange.push(
1037 1043 srcrepo,
1038 1044 destpeer,
1039 1045 revs=revs,
1040 1046 bookmarks=srcrepo._bookmarks.keys(),
1041 1047 )
1042 1048 else:
1043 1049 raise error.Abort(
1044 1050 _(b"clone from remote to remote not supported")
1045 1051 )
1046 1052
1047 1053 cleandir = None
1048 1054
1049 1055 destrepo = destpeer.local()
1050 1056 if destrepo:
1051 1057 template = uimod.samplehgrcs[b'cloned']
1052 1058 u = urlutil.url(abspath)
1053 1059 u.passwd = None
1054 1060 defaulturl = bytes(u)
1055 1061 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1056 1062 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1057 1063
1058 1064 if ui.configbool(b'experimental', b'remotenames'):
1059 1065 logexchange.pullremotenames(destrepo, srcpeer)
1060 1066
1061 1067 if update:
1062 1068 if update is not True:
1063 1069 with srcpeer.commandexecutor() as e:
1064 1070 checkout = e.callcommand(
1065 1071 b'lookup',
1066 1072 {
1067 1073 b'key': update,
1068 1074 },
1069 1075 ).result()
1070 1076
1071 1077 uprev = None
1072 1078 status = None
1073 1079 if checkout is not None:
1074 1080 # Some extensions (at least hg-git and hg-subversion) have
1075 1081 # a peer.lookup() implementation that returns a name instead
1076 1082 # of a nodeid. We work around it here until we've figured
1077 1083 # out a better solution.
1078 1084 if len(checkout) == 20 and checkout in destrepo:
1079 1085 uprev = checkout
1080 1086 elif scmutil.isrevsymbol(destrepo, checkout):
1081 1087 uprev = scmutil.revsymbol(destrepo, checkout).node()
1082 1088 else:
1083 1089 if update is not True:
1084 1090 try:
1085 1091 uprev = destrepo.lookup(update)
1086 1092 except error.RepoLookupError:
1087 1093 pass
1088 1094 if uprev is None:
1089 1095 try:
1090 1096 if destrepo._activebookmark:
1091 1097 uprev = destrepo.lookup(destrepo._activebookmark)
1092 1098 update = destrepo._activebookmark
1093 1099 else:
1094 1100 uprev = destrepo._bookmarks[b'@']
1095 1101 update = b'@'
1096 1102 bn = destrepo[uprev].branch()
1097 1103 if bn == b'default':
1098 1104 status = _(b"updating to bookmark %s\n" % update)
1099 1105 else:
1100 1106 status = (
1101 1107 _(b"updating to bookmark %s on branch %s\n")
1102 1108 ) % (update, bn)
1103 1109 except KeyError:
1104 1110 try:
1105 1111 uprev = destrepo.branchtip(b'default')
1106 1112 except error.RepoLookupError:
1107 1113 uprev = destrepo.lookup(b'tip')
1108 1114 if not status:
1109 1115 bn = destrepo[uprev].branch()
1110 1116 status = _(b"updating to branch %s\n") % bn
1111 1117 destrepo.ui.status(status)
1112 1118 _update(destrepo, uprev)
1113 1119 if update in destrepo._bookmarks:
1114 1120 bookmarks.activate(destrepo, update)
1115 1121 if destlock is not None:
1116 1122 release(destlock)
1117 1123 if destwlock is not None:
1118 1124 release(destlock)
1119 1125 # here is a tiny windows were someone could end up writing the
1120 1126 # repository before the cache are sure to be warm. This is "fine"
1121 1127 # as the only "bad" outcome would be some slowness. That potential
1122 1128 # slowness already affect reader.
1123 1129 with destrepo.lock():
1124 1130 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1125 1131 finally:
1126 1132 release(srclock, destlock, destwlock)
1127 1133 if cleandir is not None:
1128 1134 shutil.rmtree(cleandir, True)
1129 1135 if srcpeer is not None:
1130 1136 srcpeer.close()
1131 1137 if destpeer and destpeer.local() is None:
1132 1138 destpeer.close()
1133 1139 return srcpeer, destpeer
1134 1140
1135 1141
1136 1142 def _showstats(repo, stats, quietempty=False):
1137 1143 if quietempty and stats.isempty():
1138 1144 return
1139 1145 repo.ui.status(
1140 1146 _(
1141 1147 b"%d files updated, %d files merged, "
1142 1148 b"%d files removed, %d files unresolved\n"
1143 1149 )
1144 1150 % (
1145 1151 stats.updatedcount,
1146 1152 stats.mergedcount,
1147 1153 stats.removedcount,
1148 1154 stats.unresolvedcount,
1149 1155 )
1150 1156 )
1151 1157
1152 1158
1153 1159 def updaterepo(repo, node, overwrite, updatecheck=None):
1154 1160 """Update the working directory to node.
1155 1161
1156 1162 When overwrite is set, changes are clobbered, merged else
1157 1163
1158 1164 returns stats (see pydoc mercurial.merge.applyupdates)"""
1159 1165 repo.ui.deprecwarn(
1160 1166 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1161 1167 b'5.7',
1162 1168 )
1163 1169 return mergemod._update(
1164 1170 repo,
1165 1171 node,
1166 1172 branchmerge=False,
1167 1173 force=overwrite,
1168 1174 labels=[b'working copy', b'destination'],
1169 1175 updatecheck=updatecheck,
1170 1176 )
1171 1177
1172 1178
1173 1179 def update(repo, node, quietempty=False, updatecheck=None):
1174 1180 """update the working directory to node"""
1175 1181 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1176 1182 _showstats(repo, stats, quietempty)
1177 1183 if stats.unresolvedcount:
1178 1184 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1179 1185 return stats.unresolvedcount > 0
1180 1186
1181 1187
1182 1188 # naming conflict in clone()
1183 1189 _update = update
1184 1190
1185 1191
1186 1192 def clean(repo, node, show_stats=True, quietempty=False):
1187 1193 """forcibly switch the working directory to node, clobbering changes"""
1188 1194 stats = mergemod.clean_update(repo[node])
1189 1195 assert stats.unresolvedcount == 0
1190 1196 if show_stats:
1191 1197 _showstats(repo, stats, quietempty)
1192 1198 return False
1193 1199
1194 1200
1195 1201 # naming conflict in updatetotally()
1196 1202 _clean = clean
1197 1203
1198 1204 _VALID_UPDATECHECKS = {
1199 1205 mergemod.UPDATECHECK_ABORT,
1200 1206 mergemod.UPDATECHECK_NONE,
1201 1207 mergemod.UPDATECHECK_LINEAR,
1202 1208 mergemod.UPDATECHECK_NO_CONFLICT,
1203 1209 }
1204 1210
1205 1211
1206 1212 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1207 1213 """Update the working directory with extra care for non-file components
1208 1214
1209 1215 This takes care of non-file components below:
1210 1216
1211 1217 :bookmark: might be advanced or (in)activated
1212 1218
1213 1219 This takes arguments below:
1214 1220
1215 1221 :checkout: to which revision the working directory is updated
1216 1222 :brev: a name, which might be a bookmark to be activated after updating
1217 1223 :clean: whether changes in the working directory can be discarded
1218 1224 :updatecheck: how to deal with a dirty working directory
1219 1225
1220 1226 Valid values for updatecheck are the UPDATECHECK_* constants
1221 1227 defined in the merge module. Passing `None` will result in using the
1222 1228 configured default.
1223 1229
1224 1230 * ABORT: abort if the working directory is dirty
1225 1231 * NONE: don't check (merge working directory changes into destination)
1226 1232 * LINEAR: check that update is linear before merging working directory
1227 1233 changes into destination
1228 1234 * NO_CONFLICT: check that the update does not result in file merges
1229 1235
1230 1236 This returns whether conflict is detected at updating or not.
1231 1237 """
1232 1238 if updatecheck is None:
1233 1239 updatecheck = ui.config(b'commands', b'update.check')
1234 1240 if updatecheck not in _VALID_UPDATECHECKS:
1235 1241 # If not configured, or invalid value configured
1236 1242 updatecheck = mergemod.UPDATECHECK_LINEAR
1237 1243 if updatecheck not in _VALID_UPDATECHECKS:
1238 1244 raise ValueError(
1239 1245 r'Invalid updatecheck value %r (can accept %r)'
1240 1246 % (updatecheck, _VALID_UPDATECHECKS)
1241 1247 )
1242 1248 with repo.wlock():
1243 1249 movemarkfrom = None
1244 1250 warndest = False
1245 1251 if checkout is None:
1246 1252 updata = destutil.destupdate(repo, clean=clean)
1247 1253 checkout, movemarkfrom, brev = updata
1248 1254 warndest = True
1249 1255
1250 1256 if clean:
1251 1257 ret = _clean(repo, checkout)
1252 1258 else:
1253 1259 if updatecheck == mergemod.UPDATECHECK_ABORT:
1254 1260 cmdutil.bailifchanged(repo, merge=False)
1255 1261 updatecheck = mergemod.UPDATECHECK_NONE
1256 1262 ret = _update(repo, checkout, updatecheck=updatecheck)
1257 1263
1258 1264 if not ret and movemarkfrom:
1259 1265 if movemarkfrom == repo[b'.'].node():
1260 1266 pass # no-op update
1261 1267 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1262 1268 b = ui.label(repo._activebookmark, b'bookmarks.active')
1263 1269 ui.status(_(b"updating bookmark %s\n") % b)
1264 1270 else:
1265 1271 # this can happen with a non-linear update
1266 1272 b = ui.label(repo._activebookmark, b'bookmarks')
1267 1273 ui.status(_(b"(leaving bookmark %s)\n") % b)
1268 1274 bookmarks.deactivate(repo)
1269 1275 elif brev in repo._bookmarks:
1270 1276 if brev != repo._activebookmark:
1271 1277 b = ui.label(brev, b'bookmarks.active')
1272 1278 ui.status(_(b"(activating bookmark %s)\n") % b)
1273 1279 bookmarks.activate(repo, brev)
1274 1280 elif brev:
1275 1281 if repo._activebookmark:
1276 1282 b = ui.label(repo._activebookmark, b'bookmarks')
1277 1283 ui.status(_(b"(leaving bookmark %s)\n") % b)
1278 1284 bookmarks.deactivate(repo)
1279 1285
1280 1286 if warndest:
1281 1287 destutil.statusotherdests(ui, repo)
1282 1288
1283 1289 return ret
1284 1290
1285 1291
1286 1292 def merge(
1287 1293 ctx,
1288 1294 force=False,
1289 1295 remind=True,
1290 1296 labels=None,
1291 1297 ):
1292 1298 """Branch merge with node, resolving changes. Return true if any
1293 1299 unresolved conflicts."""
1294 1300 repo = ctx.repo()
1295 1301 stats = mergemod.merge(ctx, force=force, labels=labels)
1296 1302 _showstats(repo, stats)
1297 1303 if stats.unresolvedcount:
1298 1304 repo.ui.status(
1299 1305 _(
1300 1306 b"use 'hg resolve' to retry unresolved file merges "
1301 1307 b"or 'hg merge --abort' to abandon\n"
1302 1308 )
1303 1309 )
1304 1310 elif remind:
1305 1311 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1306 1312 return stats.unresolvedcount > 0
1307 1313
1308 1314
1309 1315 def abortmerge(ui, repo):
1310 1316 ms = mergestatemod.mergestate.read(repo)
1311 1317 if ms.active():
1312 1318 # there were conflicts
1313 1319 node = ms.localctx.hex()
1314 1320 else:
1315 1321 # there were no conficts, mergestate was not stored
1316 1322 node = repo[b'.'].hex()
1317 1323
1318 1324 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1319 1325 stats = mergemod.clean_update(repo[node])
1320 1326 assert stats.unresolvedcount == 0
1321 1327 _showstats(repo, stats)
1322 1328
1323 1329
1324 1330 def _incoming(
1325 1331 displaychlist,
1326 1332 subreporecurse,
1327 1333 ui,
1328 1334 repo,
1329 1335 source,
1330 1336 opts,
1331 1337 buffered=False,
1332 1338 subpath=None,
1333 1339 ):
1334 1340 """
1335 1341 Helper for incoming / gincoming.
1336 1342 displaychlist gets called with
1337 1343 (remoterepo, incomingchangesetlist, displayer) parameters,
1338 1344 and is supposed to contain only code that can't be unified.
1339 1345 """
1340 1346 srcs = urlutil.get_pull_paths(repo, ui, [source])
1341 1347 srcs = list(srcs)
1342 1348 if len(srcs) != 1:
1343 1349 msg = _(b'for now, incoming supports only a single source, %d provided')
1344 1350 msg %= len(srcs)
1345 1351 raise error.Abort(msg)
1346 1352 path = srcs[0]
1347 1353 if subpath is None:
1348 1354 peer_path = path
1349 1355 url = path.loc
1350 1356 else:
1351 1357 # XXX path: we are losing the `path` object here. Keeping it would be
1352 1358 # valuable. For example as a "variant" as we do for pushes.
1353 1359 subpath = urlutil.url(subpath)
1354 1360 if subpath.isabs():
1355 1361 peer_path = url = bytes(subpath)
1356 1362 else:
1357 1363 p = urlutil.url(path.loc)
1358 1364 if p.islocal():
1359 1365 normpath = os.path.normpath
1360 1366 else:
1361 1367 normpath = posixpath.normpath
1362 1368 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 1369 peer_path = url = bytes(p)
1364 1370 other = peer(repo, opts, peer_path)
1365 1371 cleanupfn = other.close
1366 1372 try:
1367 1373 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1368 1374 branches = (path.branch, opts.get(b'branch', []))
1369 1375 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1370 1376
1371 1377 if revs:
1372 1378 revs = [other.lookup(rev) for rev in revs]
1373 1379 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1374 1380 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1375 1381 )
1376 1382
1377 1383 if not chlist:
1378 1384 ui.status(_(b"no changes found\n"))
1379 1385 return subreporecurse()
1380 1386 ui.pager(b'incoming')
1381 1387 displayer = logcmdutil.changesetdisplayer(
1382 1388 ui, other, opts, buffered=buffered
1383 1389 )
1384 1390 displaychlist(other, chlist, displayer)
1385 1391 displayer.close()
1386 1392 finally:
1387 1393 cleanupfn()
1388 1394 subreporecurse()
1389 1395 return 0 # exit code is zero since we found incoming changes
1390 1396
1391 1397
1392 1398 def incoming(ui, repo, source, opts, subpath=None):
1393 1399 def subreporecurse():
1394 1400 ret = 1
1395 1401 if opts.get(b'subrepos'):
1396 1402 ctx = repo[None]
1397 1403 for subpath in sorted(ctx.substate):
1398 1404 sub = ctx.sub(subpath)
1399 1405 ret = min(ret, sub.incoming(ui, source, opts))
1400 1406 return ret
1401 1407
1402 1408 def display(other, chlist, displayer):
1403 1409 limit = logcmdutil.getlimit(opts)
1404 1410 if opts.get(b'newest_first'):
1405 1411 chlist.reverse()
1406 1412 count = 0
1407 1413 for n in chlist:
1408 1414 if limit is not None and count >= limit:
1409 1415 break
1410 1416 parents = [
1411 1417 p for p in other.changelog.parents(n) if p != repo.nullid
1412 1418 ]
1413 1419 if opts.get(b'no_merges') and len(parents) == 2:
1414 1420 continue
1415 1421 count += 1
1416 1422 displayer.show(other[n])
1417 1423
1418 1424 return _incoming(
1419 1425 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1420 1426 )
1421 1427
1422 1428
1423 1429 def _outgoing(ui, repo, dests, opts, subpath=None):
1424 1430 out = set()
1425 1431 others = []
1426 1432 for path in urlutil.get_push_paths(repo, ui, dests):
1427 1433 dest = path.loc
1428 1434 if subpath is not None:
1429 1435 subpath = urlutil.url(subpath)
1430 1436 if subpath.isabs():
1431 1437 dest = bytes(subpath)
1432 1438 else:
1433 1439 p = urlutil.url(dest)
1434 1440 if p.islocal():
1435 1441 normpath = os.path.normpath
1436 1442 else:
1437 1443 normpath = posixpath.normpath
1438 1444 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 1445 dest = bytes(p)
1440 1446 branches = path.branch, opts.get(b'branch') or []
1441 1447
1442 1448 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 1449 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 1450 if revs:
1445 1451 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446 1452
1447 1453 other = peer(repo, opts, dest)
1448 1454 try:
1449 1455 outgoing = discovery.findcommonoutgoing(
1450 1456 repo, other, revs, force=opts.get(b'force')
1451 1457 )
1452 1458 o = outgoing.missing
1453 1459 out.update(o)
1454 1460 if not o:
1455 1461 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 1462 others.append(other)
1457 1463 except: # re-raises
1458 1464 other.close()
1459 1465 raise
1460 1466 # make sure this is ordered by revision number
1461 1467 outgoing_revs = list(out)
1462 1468 cl = repo.changelog
1463 1469 outgoing_revs.sort(key=cl.rev)
1464 1470 return outgoing_revs, others
1465 1471
1466 1472
1467 1473 def _outgoing_recurse(ui, repo, dests, opts):
1468 1474 ret = 1
1469 1475 if opts.get(b'subrepos'):
1470 1476 ctx = repo[None]
1471 1477 for subpath in sorted(ctx.substate):
1472 1478 sub = ctx.sub(subpath)
1473 1479 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 1480 return ret
1475 1481
1476 1482
1477 1483 def _outgoing_filter(repo, revs, opts):
1478 1484 """apply revision filtering/ordering option for outgoing"""
1479 1485 limit = logcmdutil.getlimit(opts)
1480 1486 no_merges = opts.get(b'no_merges')
1481 1487 if opts.get(b'newest_first'):
1482 1488 revs.reverse()
1483 1489 if limit is None and not no_merges:
1484 1490 for r in revs:
1485 1491 yield r
1486 1492 return
1487 1493
1488 1494 count = 0
1489 1495 cl = repo.changelog
1490 1496 for n in revs:
1491 1497 if limit is not None and count >= limit:
1492 1498 break
1493 1499 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 1500 if no_merges and len(parents) == 2:
1495 1501 continue
1496 1502 count += 1
1497 1503 yield n
1498 1504
1499 1505
1500 1506 def outgoing(ui, repo, dests, opts, subpath=None):
1501 1507 if opts.get(b'graph'):
1502 1508 logcmdutil.checkunsupportedgraphflags([], opts)
1503 1509 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 1510 ret = 1
1505 1511 try:
1506 1512 if o:
1507 1513 ret = 0
1508 1514
1509 1515 if opts.get(b'graph'):
1510 1516 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 1517 ui.pager(b'outgoing')
1512 1518 displayer = logcmdutil.changesetdisplayer(
1513 1519 ui, repo, opts, buffered=True
1514 1520 )
1515 1521 logcmdutil.displaygraph(
1516 1522 ui, repo, revdag, displayer, graphmod.asciiedges
1517 1523 )
1518 1524 else:
1519 1525 ui.pager(b'outgoing')
1520 1526 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 1527 for n in _outgoing_filter(repo, o, opts):
1522 1528 displayer.show(repo[n])
1523 1529 displayer.close()
1524 1530 for oth in others:
1525 1531 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 1532 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 1533 return ret # exit code is zero since we found outgoing changes
1528 1534 finally:
1529 1535 for oth in others:
1530 1536 oth.close()
1531 1537
1532 1538
1533 1539 def verify(repo, level=None):
1534 1540 """verify the consistency of a repository"""
1535 1541 ret = verifymod.verify(repo, level=level)
1536 1542
1537 1543 # Broken subrepo references in hidden csets don't seem worth worrying about,
1538 1544 # since they can't be pushed/pulled, and --hidden can be used if they are a
1539 1545 # concern.
1540 1546
1541 1547 # pathto() is needed for -R case
1542 1548 revs = repo.revs(
1543 1549 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1544 1550 )
1545 1551
1546 1552 if revs:
1547 1553 repo.ui.status(_(b'checking subrepo links\n'))
1548 1554 for rev in revs:
1549 1555 ctx = repo[rev]
1550 1556 try:
1551 1557 for subpath in ctx.substate:
1552 1558 try:
1553 1559 ret = (
1554 1560 ctx.sub(subpath, allowcreate=False).verify() or ret
1555 1561 )
1556 1562 except error.RepoError as e:
1557 1563 repo.ui.warn(b'%d: %s\n' % (rev, e))
1558 1564 except Exception:
1559 1565 repo.ui.warn(
1560 1566 _(b'.hgsubstate is corrupt in revision %s\n')
1561 1567 % short(ctx.node())
1562 1568 )
1563 1569
1564 1570 return ret
1565 1571
1566 1572
1567 1573 def remoteui(src, opts):
1568 1574 """build a remote ui from ui or repo and opts"""
1569 1575 if hasattr(src, 'baseui'): # looks like a repository
1570 1576 dst = src.baseui.copy() # drop repo-specific config
1571 1577 src = src.ui # copy target options from repo
1572 1578 else: # assume it's a global ui object
1573 1579 dst = src.copy() # keep all global options
1574 1580
1575 1581 # copy ssh-specific options
1576 1582 for o in b'ssh', b'remotecmd':
1577 1583 v = opts.get(o) or src.config(b'ui', o)
1578 1584 if v:
1579 1585 dst.setconfig(b"ui", o, v, b'copied')
1580 1586
1581 1587 # copy bundle-specific options
1582 1588 r = src.config(b'bundle', b'mainreporoot')
1583 1589 if r:
1584 1590 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1585 1591
1586 1592 # copy selected local settings to the remote ui
1587 1593 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1588 1594 for key, val in src.configitems(sect):
1589 1595 dst.setconfig(sect, key, val, b'copied')
1590 1596 v = src.config(b'web', b'cacerts')
1591 1597 if v:
1592 1598 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1593 1599
1594 1600 return dst
1595 1601
1596 1602
1597 1603 # Files of interest
1598 1604 # Used to check if the repository has changed looking at mtime and size of
1599 1605 # these files.
1600 foi = [
1606 foi: "List[Tuple[str, bytes]]" = [
1601 1607 ('spath', b'00changelog.i'),
1602 1608 ('spath', b'phaseroots'), # ! phase can change content at the same size
1603 1609 ('spath', b'obsstore'),
1604 1610 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1605 1611 ]
1606 1612
1607 1613
1608 1614 class cachedlocalrepo:
1609 1615 """Holds a localrepository that can be cached and reused."""
1610 1616
1611 1617 def __init__(self, repo):
1612 1618 """Create a new cached repo from an existing repo.
1613 1619
1614 1620 We assume the passed in repo was recently created. If the
1615 1621 repo has changed between when it was created and when it was
1616 1622 turned into a cache, it may not refresh properly.
1617 1623 """
1618 1624 assert isinstance(repo, localrepo.localrepository)
1619 1625 self._repo = repo
1620 1626 self._state, self.mtime = self._repostate()
1621 1627 self._filtername = repo.filtername
1622 1628
1623 1629 def fetch(self):
1624 1630 """Refresh (if necessary) and return a repository.
1625 1631
1626 1632 If the cached instance is out of date, it will be recreated
1627 1633 automatically and returned.
1628 1634
1629 1635 Returns a tuple of the repo and a boolean indicating whether a new
1630 1636 repo instance was created.
1631 1637 """
1632 1638 # We compare the mtimes and sizes of some well-known files to
1633 1639 # determine if the repo changed. This is not precise, as mtimes
1634 1640 # are susceptible to clock skew and imprecise filesystems and
1635 1641 # file content can change while maintaining the same size.
1636 1642
1637 1643 state, mtime = self._repostate()
1638 1644 if state == self._state:
1639 1645 return self._repo, False
1640 1646
1641 1647 repo = repository(self._repo.baseui, self._repo.url())
1642 1648 if self._filtername:
1643 1649 self._repo = repo.filtered(self._filtername)
1644 1650 else:
1645 1651 self._repo = repo.unfiltered()
1646 1652 self._state = state
1647 1653 self.mtime = mtime
1648 1654
1649 1655 return self._repo, True
1650 1656
1651 1657 def _repostate(self):
1652 1658 state = []
1653 1659 maxmtime = -1
1654 1660 for attr, fname in foi:
1655 1661 prefix = getattr(self._repo, attr)
1656 1662 p = os.path.join(prefix, fname)
1657 1663 try:
1658 1664 st = os.stat(p)
1659 1665 except OSError:
1660 1666 st = os.stat(prefix)
1661 1667 state.append((st[stat.ST_MTIME], st.st_size))
1662 1668 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1663 1669
1664 1670 return tuple(state), maxmtime
1665 1671
1666 1672 def copy(self):
1667 1673 """Obtain a copy of this class instance.
1668 1674
1669 1675 A new localrepository instance is obtained. The new instance should be
1670 1676 completely independent of the original.
1671 1677 """
1672 1678 repo = repository(self._repo.baseui, self._repo.origroot)
1673 1679 if self._filtername:
1674 1680 repo = repo.filtered(self._filtername)
1675 1681 else:
1676 1682 repo = repo.unfiltered()
1677 1683 c = cachedlocalrepo(repo)
1678 1684 c._state = self._state
1679 1685 c.mtime = self.mtime
1680 1686 return c
General Comments 0
You need to be logged in to leave comments. Login now