##// END OF EJS Templates
peer-or-repo: make sure object in "scheme" have a `instance` object...
marmoute -
r50580:d9791643 default
parent child Browse files
Show More
@@ -1,1604 +1,1611 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
70
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
78 except ValueError as e:
79 raise error.Abort(
80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 )
82 except OSError:
83 isfile = False
84
85 return isfile and bundlerepo or localrepo
86
87
88 68 def addbranchrevs(lrepo, other, branches, revs):
89 69 peer = other.peer() # a courtesy to callers using a localrepo for other
90 70 hashbranch, branches = branches
91 71 if not hashbranch and not branches:
92 72 x = revs or None
93 73 if revs:
94 74 y = revs[0]
95 75 else:
96 76 y = None
97 77 return x, y
98 78 if revs:
99 79 revs = list(revs)
100 80 else:
101 81 revs = []
102 82
103 83 if not peer.capable(b'branchmap'):
104 84 if branches:
105 85 raise error.Abort(_(b"remote branch lookup not supported"))
106 86 revs.append(hashbranch)
107 87 return revs, revs[0]
108 88
109 89 with peer.commandexecutor() as e:
110 90 branchmap = e.callcommand(b'branchmap', {}).result()
111 91
112 92 def primary(branch):
113 93 if branch == b'.':
114 94 if not lrepo:
115 95 raise error.Abort(_(b"dirstate branch not accessible"))
116 96 branch = lrepo.dirstate.branch()
117 97 if branch in branchmap:
118 98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 99 return True
120 100 else:
121 101 return False
122 102
123 103 for branch in branches:
124 104 if not primary(branch):
125 105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 106 if hashbranch:
127 107 if not primary(hashbranch):
128 108 revs.append(hashbranch)
129 109 return revs, revs[0]
130 110
131 111
112 def _isfile(path):
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
118 st = os.stat(path)
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
123 return False
124 else:
125 return stat.S_ISREG(st.st_mode)
126
127
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
131 @staticmethod
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
135
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
140 cls = bundlerepo
141 else:
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
144
145
132 146 schemes = {
133 147 b'bundle': bundlerepo,
134 148 b'union': unionrepo,
135 b'file': _local,
149 b'file': LocalFactory,
136 150 b'http': httppeer,
137 151 b'https': httppeer,
138 152 b'ssh': sshpeer,
139 153 b'static-http': statichttprepo,
140 154 }
141 155
142 156
143 157 def _peerlookup(path):
144 158 u = urlutil.url(path)
145 159 scheme = u.scheme or b'file'
146 160 thing = schemes.get(scheme) or schemes[b'file']
147 try:
148 return thing(path)
149 except TypeError:
150 # we can't test callable(thing) because 'thing' can be an unloaded
151 # module that implements __call__
152 if not util.safehasattr(thing, b'instance'):
153 raise
154 return thing
161 return thing
155 162
156 163
157 164 def islocal(repo):
158 165 '''return true if repo (or path pointing to repo) is local'''
159 166 if isinstance(repo, bytes):
160 167 try:
161 168 return _peerlookup(repo).islocal(repo)
162 169 except AttributeError:
163 170 return False
164 171 return repo.local()
165 172
166 173
167 174 def openpath(ui, path, sendaccept=True):
168 175 '''open path with open if local, url.open if remote'''
169 176 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
170 177 if pathurl.islocal():
171 178 return util.posixfile(pathurl.localpath(), b'rb')
172 179 else:
173 180 return url.open(ui, path, sendaccept=sendaccept)
174 181
175 182
176 183 # a list of (ui, repo) functions called for wire peer initialization
177 184 wirepeersetupfuncs = []
178 185
179 186
180 187 def _peerorrepo(
181 188 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 189 ):
183 190 """return a repository object for the specified path"""
184 191 cls = _peerlookup(path)
185 192 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
186 193 _setup_repo_or_peer(ui, obj, presetupfuncs)
187 194 return obj
188 195
189 196
190 197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
191 198 ui = getattr(obj, "ui", ui)
192 199 for f in presetupfuncs or []:
193 200 f(ui, obj)
194 201 ui.log(b'extension', b'- executing reposetup hooks\n')
195 202 with util.timedcm('all reposetup') as allreposetupstats:
196 203 for name, module in extensions.extensions(ui):
197 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
198 205 hook = getattr(module, 'reposetup', None)
199 206 if hook:
200 207 with util.timedcm('reposetup %r', name) as stats:
201 208 hook(ui, obj)
202 209 msg = b' > reposetup for %s took %s\n'
203 210 ui.log(b'extension', msg, name, stats)
204 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
205 212 if not obj.local():
206 213 for f in wirepeersetupfuncs:
207 214 f(ui, obj)
208 215
209 216
210 217 def repository(
211 218 ui,
212 219 path=b'',
213 220 create=False,
214 221 presetupfuncs=None,
215 222 intents=None,
216 223 createopts=None,
217 224 ):
218 225 """return a repository object for the specified path"""
219 226 peer = _peerorrepo(
220 227 ui,
221 228 path,
222 229 create,
223 230 presetupfuncs=presetupfuncs,
224 231 intents=intents,
225 232 createopts=createopts,
226 233 )
227 234 repo = peer.local()
228 235 if not repo:
229 236 raise error.Abort(
230 237 _(b"repository '%s' is not local") % (path or peer.url())
231 238 )
232 239 return repo.filtered(b'visible')
233 240
234 241
235 242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
236 243 '''return a repository peer for the specified path'''
237 244 rui = remoteui(uiorrepo, opts)
238 245 return _peerorrepo(
239 246 rui, path, create, intents=intents, createopts=createopts
240 247 ).peer()
241 248
242 249
243 250 def defaultdest(source):
244 251 """return default destination of clone if none is given
245 252
246 253 >>> defaultdest(b'foo')
247 254 'foo'
248 255 >>> defaultdest(b'/foo/bar')
249 256 'bar'
250 257 >>> defaultdest(b'/')
251 258 ''
252 259 >>> defaultdest(b'')
253 260 ''
254 261 >>> defaultdest(b'http://example.org/')
255 262 ''
256 263 >>> defaultdest(b'http://example.org/foo/')
257 264 'foo'
258 265 """
259 266 path = urlutil.url(source).path
260 267 if not path:
261 268 return b''
262 269 return os.path.basename(os.path.normpath(path))
263 270
264 271
265 272 def sharedreposource(repo):
266 273 """Returns repository object for source repository of a shared repo.
267 274
268 275 If repo is not a shared repository, returns None.
269 276 """
270 277 if repo.sharedpath == repo.path:
271 278 return None
272 279
273 280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
274 281 return repo.srcrepo
275 282
276 283 # the sharedpath always ends in the .hg; we want the path to the repo
277 284 source = repo.vfs.split(repo.sharedpath)[0]
278 285 srcurl, branches = urlutil.parseurl(source)
279 286 srcrepo = repository(repo.ui, srcurl)
280 287 repo.srcrepo = srcrepo
281 288 return srcrepo
282 289
283 290
284 291 def share(
285 292 ui,
286 293 source,
287 294 dest=None,
288 295 update=True,
289 296 bookmarks=True,
290 297 defaultpath=None,
291 298 relative=False,
292 299 ):
293 300 '''create a shared repository'''
294 301
295 302 if not islocal(source):
296 303 raise error.Abort(_(b'can only share local repositories'))
297 304
298 305 if not dest:
299 306 dest = defaultdest(source)
300 307 else:
301 308 dest = urlutil.get_clone_path(ui, dest)[1]
302 309
303 310 if isinstance(source, bytes):
304 311 origsource, source, branches = urlutil.get_clone_path(ui, source)
305 312 srcrepo = repository(ui, source)
306 313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
307 314 else:
308 315 srcrepo = source.local()
309 316 checkout = None
310 317
311 318 shareditems = set()
312 319 if bookmarks:
313 320 shareditems.add(sharedbookmarks)
314 321
315 322 r = repository(
316 323 ui,
317 324 dest,
318 325 create=True,
319 326 createopts={
320 327 b'sharedrepo': srcrepo,
321 328 b'sharedrelative': relative,
322 329 b'shareditems': shareditems,
323 330 },
324 331 )
325 332
326 333 postshare(srcrepo, r, defaultpath=defaultpath)
327 334 r = repository(ui, dest)
328 335 _postshareupdate(r, update, checkout=checkout)
329 336 return r
330 337
331 338
332 339 def _prependsourcehgrc(repo):
333 340 """copies the source repo config and prepend it in current repo .hg/hgrc
334 341 on unshare. This is only done if the share was perfomed using share safe
335 342 method where we share config of source in shares"""
336 343 srcvfs = vfsmod.vfs(repo.sharedpath)
337 344 dstvfs = vfsmod.vfs(repo.path)
338 345
339 346 if not srcvfs.exists(b'hgrc'):
340 347 return
341 348
342 349 currentconfig = b''
343 350 if dstvfs.exists(b'hgrc'):
344 351 currentconfig = dstvfs.read(b'hgrc')
345 352
346 353 with dstvfs(b'hgrc', b'wb') as fp:
347 354 sourceconfig = srcvfs.read(b'hgrc')
348 355 fp.write(b"# Config copied from shared source\n")
349 356 fp.write(sourceconfig)
350 357 fp.write(b'\n')
351 358 fp.write(currentconfig)
352 359
353 360
354 361 def unshare(ui, repo):
355 362 """convert a shared repository to a normal one
356 363
357 364 Copy the store data to the repo and remove the sharedpath data.
358 365
359 366 Returns a new repository object representing the unshared repository.
360 367
361 368 The passed repository object is not usable after this function is
362 369 called.
363 370 """
364 371
365 372 with repo.lock():
366 373 # we use locks here because if we race with commit, we
367 374 # can end up with extra data in the cloned revlogs that's
368 375 # not pointed to by changesets, thus causing verify to
369 376 # fail
370 377 destlock = copystore(ui, repo, repo.path)
371 378 with destlock or util.nullcontextmanager():
372 379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
373 380 # we were sharing .hg/hgrc of the share source with the current
374 381 # repo. We need to copy that while unsharing otherwise it can
375 382 # disable hooks and other checks
376 383 _prependsourcehgrc(repo)
377 384
378 385 sharefile = repo.vfs.join(b'sharedpath')
379 386 util.rename(sharefile, sharefile + b'.old')
380 387
381 388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
382 389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
383 390 scmutil.writereporequirements(repo)
384 391
385 392 # Removing share changes some fundamental properties of the repo instance.
386 393 # So we instantiate a new repo object and operate on it rather than
387 394 # try to keep the existing repo usable.
388 395 newrepo = repository(repo.baseui, repo.root, create=False)
389 396
390 397 # TODO: figure out how to access subrepos that exist, but were previously
391 398 # removed from .hgsub
392 399 c = newrepo[b'.']
393 400 subs = c.substate
394 401 for s in sorted(subs):
395 402 c.sub(s).unshare()
396 403
397 404 localrepo.poisonrepository(repo)
398 405
399 406 return newrepo
400 407
401 408
402 409 def postshare(sourcerepo, destrepo, defaultpath=None):
403 410 """Called after a new shared repo is created.
404 411
405 412 The new repo only has a requirements file and pointer to the source.
406 413 This function configures additional shared data.
407 414
408 415 Extensions can wrap this function and write additional entries to
409 416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
410 417 """
411 418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
412 419 if default:
413 420 template = b'[paths]\ndefault = %s\n'
414 421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
415 422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
416 423 with destrepo.wlock():
417 424 narrowspec.copytoworkingcopy(destrepo)
418 425
419 426
420 427 def _postshareupdate(repo, update, checkout=None):
421 428 """Maybe perform a working directory update after a shared repo is created.
422 429
423 430 ``update`` can be a boolean or a revision to update to.
424 431 """
425 432 if not update:
426 433 return
427 434
428 435 repo.ui.status(_(b"updating working directory\n"))
429 436 if update is not True:
430 437 checkout = update
431 438 for test in (checkout, b'default', b'tip'):
432 439 if test is None:
433 440 continue
434 441 try:
435 442 uprev = repo.lookup(test)
436 443 break
437 444 except error.RepoLookupError:
438 445 continue
439 446 _update(repo, uprev)
440 447
441 448
442 449 def copystore(ui, srcrepo, destpath):
443 450 """copy files from store of srcrepo in destpath
444 451
445 452 returns destlock
446 453 """
447 454 destlock = None
448 455 try:
449 456 hardlink = None
450 457 topic = _(b'linking') if hardlink else _(b'copying')
451 458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
452 459 num = 0
453 460 srcpublishing = srcrepo.publishing()
454 461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
455 462 dstvfs = vfsmod.vfs(destpath)
456 463 for f in srcrepo.store.copylist():
457 464 if srcpublishing and f.endswith(b'phaseroots'):
458 465 continue
459 466 dstbase = os.path.dirname(f)
460 467 if dstbase and not dstvfs.exists(dstbase):
461 468 dstvfs.mkdir(dstbase)
462 469 if srcvfs.exists(f):
463 470 if f.endswith(b'data'):
464 471 # 'dstbase' may be empty (e.g. revlog format 0)
465 472 lockfile = os.path.join(dstbase, b"lock")
466 473 # lock to avoid premature writing to the target
467 474 destlock = lock.lock(dstvfs, lockfile)
468 475 hardlink, n = util.copyfiles(
469 476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
470 477 )
471 478 num += n
472 479 if hardlink:
473 480 ui.debug(b"linked %d files\n" % num)
474 481 else:
475 482 ui.debug(b"copied %d files\n" % num)
476 483 return destlock
477 484 except: # re-raises
478 485 release(destlock)
479 486 raise
480 487
481 488
482 489 def clonewithshare(
483 490 ui,
484 491 peeropts,
485 492 sharepath,
486 493 source,
487 494 srcpeer,
488 495 dest,
489 496 pull=False,
490 497 rev=None,
491 498 update=True,
492 499 stream=False,
493 500 ):
494 501 """Perform a clone using a shared repo.
495 502
496 503 The store for the repository will be located at <sharepath>/.hg. The
497 504 specified revisions will be cloned or pulled from "source". A shared repo
498 505 will be created at "dest" and a working copy will be created if "update" is
499 506 True.
500 507 """
501 508 revs = None
502 509 if rev:
503 510 if not srcpeer.capable(b'lookup'):
504 511 raise error.Abort(
505 512 _(
506 513 b"src repository does not support "
507 514 b"revision lookup and so doesn't "
508 515 b"support clone by revision"
509 516 )
510 517 )
511 518
512 519 # TODO this is batchable.
513 520 remoterevs = []
514 521 for r in rev:
515 522 with srcpeer.commandexecutor() as e:
516 523 remoterevs.append(
517 524 e.callcommand(
518 525 b'lookup',
519 526 {
520 527 b'key': r,
521 528 },
522 529 ).result()
523 530 )
524 531 revs = remoterevs
525 532
526 533 # Obtain a lock before checking for or cloning the pooled repo otherwise
527 534 # 2 clients may race creating or populating it.
528 535 pooldir = os.path.dirname(sharepath)
529 536 # lock class requires the directory to exist.
530 537 try:
531 538 util.makedir(pooldir, False)
532 539 except FileExistsError:
533 540 pass
534 541
535 542 poolvfs = vfsmod.vfs(pooldir)
536 543 basename = os.path.basename(sharepath)
537 544
538 545 with lock.lock(poolvfs, b'%s.lock' % basename):
539 546 if os.path.exists(sharepath):
540 547 ui.status(
541 548 _(b'(sharing from existing pooled repository %s)\n') % basename
542 549 )
543 550 else:
544 551 ui.status(
545 552 _(b'(sharing from new pooled repository %s)\n') % basename
546 553 )
547 554 # Always use pull mode because hardlinks in share mode don't work
548 555 # well. Never update because working copies aren't necessary in
549 556 # share mode.
550 557 clone(
551 558 ui,
552 559 peeropts,
553 560 source,
554 561 dest=sharepath,
555 562 pull=True,
556 563 revs=rev,
557 564 update=False,
558 565 stream=stream,
559 566 )
560 567
561 568 # Resolve the value to put in [paths] section for the source.
562 569 if islocal(source):
563 570 defaultpath = util.abspath(urlutil.urllocalpath(source))
564 571 else:
565 572 defaultpath = source
566 573
567 574 sharerepo = repository(ui, path=sharepath)
568 575 destrepo = share(
569 576 ui,
570 577 sharerepo,
571 578 dest=dest,
572 579 update=False,
573 580 bookmarks=False,
574 581 defaultpath=defaultpath,
575 582 )
576 583
577 584 # We need to perform a pull against the dest repo to fetch bookmarks
578 585 # and other non-store data that isn't shared by default. In the case of
579 586 # non-existing shared repo, this means we pull from the remote twice. This
580 587 # is a bit weird. But at the time it was implemented, there wasn't an easy
581 588 # way to pull just non-changegroup data.
582 589 exchange.pull(destrepo, srcpeer, heads=revs)
583 590
584 591 _postshareupdate(destrepo, update)
585 592
586 593 return srcpeer, peer(ui, peeropts, dest)
587 594
588 595
589 596 # Recomputing caches is often slow on big repos, so copy them.
590 597 def _copycache(srcrepo, dstcachedir, fname):
591 598 """copy a cache from srcrepo to destcachedir (if it exists)"""
592 599 srcfname = srcrepo.cachevfs.join(fname)
593 600 dstfname = os.path.join(dstcachedir, fname)
594 601 if os.path.exists(srcfname):
595 602 if not os.path.exists(dstcachedir):
596 603 os.mkdir(dstcachedir)
597 604 util.copyfile(srcfname, dstfname)
598 605
599 606
600 607 def clone(
601 608 ui,
602 609 peeropts,
603 610 source,
604 611 dest=None,
605 612 pull=False,
606 613 revs=None,
607 614 update=True,
608 615 stream=False,
609 616 branch=None,
610 617 shareopts=None,
611 618 storeincludepats=None,
612 619 storeexcludepats=None,
613 620 depth=None,
614 621 ):
615 622 """Make a copy of an existing repository.
616 623
617 624 Create a copy of an existing repository in a new directory. The
618 625 source and destination are URLs, as passed to the repository
619 626 function. Returns a pair of repository peers, the source and
620 627 newly created destination.
621 628
622 629 The location of the source is added to the new repository's
623 630 .hg/hgrc file, as the default to be used for future pulls and
624 631 pushes.
625 632
626 633 If an exception is raised, the partly cloned/updated destination
627 634 repository will be deleted.
628 635
629 636 Arguments:
630 637
631 638 source: repository object or URL
632 639
633 640 dest: URL of destination repository to create (defaults to base
634 641 name of source repository)
635 642
636 643 pull: always pull from source repository, even in local case or if the
637 644 server prefers streaming
638 645
639 646 stream: stream raw data uncompressed from repository (fast over
640 647 LAN, slow over WAN)
641 648
642 649 revs: revision to clone up to (implies pull=True)
643 650
644 651 update: update working directory after clone completes, if
645 652 destination is local repository (True means update to default rev,
646 653 anything else is treated as a revision)
647 654
648 655 branch: branches to clone
649 656
650 657 shareopts: dict of options to control auto sharing behavior. The "pool" key
651 658 activates auto sharing mode and defines the directory for stores. The
652 659 "mode" key determines how to construct the directory name of the shared
653 660 repository. "identity" means the name is derived from the node of the first
654 661 changeset in the repository. "remote" means the name is derived from the
655 662 remote's path/URL. Defaults to "identity."
656 663
657 664 storeincludepats and storeexcludepats: sets of file patterns to include and
658 665 exclude in the repository copy, respectively. If not defined, all files
659 666 will be included (a "full" clone). Otherwise a "narrow" clone containing
660 667 only the requested files will be performed. If ``storeincludepats`` is not
661 668 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
662 669 ``path:.``. If both are empty sets, no files will be cloned.
663 670 """
664 671
665 672 if isinstance(source, bytes):
666 673 src = urlutil.get_clone_path(ui, source, branch)
667 674 origsource, source, branches = src
668 675 srcpeer = peer(ui, peeropts, source)
669 676 else:
670 677 srcpeer = source.peer() # in case we were called with a localrepo
671 678 branches = (None, branch or [])
672 679 origsource = source = srcpeer.url()
673 680 srclock = destlock = destwlock = cleandir = None
674 681 destpeer = None
675 682 try:
676 683 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
677 684
678 685 if dest is None:
679 686 dest = defaultdest(source)
680 687 if dest:
681 688 ui.status(_(b"destination directory: %s\n") % dest)
682 689 else:
683 690 dest = urlutil.get_clone_path(ui, dest)[0]
684 691
685 692 dest = urlutil.urllocalpath(dest)
686 693 source = urlutil.urllocalpath(source)
687 694
688 695 if not dest:
689 696 raise error.InputError(_(b"empty destination path is not valid"))
690 697
691 698 destvfs = vfsmod.vfs(dest, expandpath=True)
692 699 if destvfs.lexists():
693 700 if not destvfs.isdir():
694 701 raise error.InputError(
695 702 _(b"destination '%s' already exists") % dest
696 703 )
697 704 elif destvfs.listdir():
698 705 raise error.InputError(
699 706 _(b"destination '%s' is not empty") % dest
700 707 )
701 708
702 709 createopts = {}
703 710 narrow = False
704 711
705 712 if storeincludepats is not None:
706 713 narrowspec.validatepatterns(storeincludepats)
707 714 narrow = True
708 715
709 716 if storeexcludepats is not None:
710 717 narrowspec.validatepatterns(storeexcludepats)
711 718 narrow = True
712 719
713 720 if narrow:
714 721 # Include everything by default if only exclusion patterns defined.
715 722 if storeexcludepats and not storeincludepats:
716 723 storeincludepats = {b'path:.'}
717 724
718 725 createopts[b'narrowfiles'] = True
719 726
720 727 if depth:
721 728 createopts[b'shallowfilestore'] = True
722 729
723 730 if srcpeer.capable(b'lfs-serve'):
724 731 # Repository creation honors the config if it disabled the extension, so
725 732 # we can't just announce that lfs will be enabled. This check avoids
726 733 # saying that lfs will be enabled, and then saying it's an unknown
727 734 # feature. The lfs creation option is set in either case so that a
728 735 # requirement is added. If the extension is explicitly disabled but the
729 736 # requirement is set, the clone aborts early, before transferring any
730 737 # data.
731 738 createopts[b'lfs'] = True
732 739
733 740 if extensions.disabled_help(b'lfs'):
734 741 ui.status(
735 742 _(
736 743 b'(remote is using large file support (lfs), but it is '
737 744 b'explicitly disabled in the local configuration)\n'
738 745 )
739 746 )
740 747 else:
741 748 ui.status(
742 749 _(
743 750 b'(remote is using large file support (lfs); lfs will '
744 751 b'be enabled for this repository)\n'
745 752 )
746 753 )
747 754
748 755 shareopts = shareopts or {}
749 756 sharepool = shareopts.get(b'pool')
750 757 sharenamemode = shareopts.get(b'mode')
751 758 if sharepool and islocal(dest):
752 759 sharepath = None
753 760 if sharenamemode == b'identity':
754 761 # Resolve the name from the initial changeset in the remote
755 762 # repository. This returns nullid when the remote is empty. It
756 763 # raises RepoLookupError if revision 0 is filtered or otherwise
757 764 # not available. If we fail to resolve, sharing is not enabled.
758 765 try:
759 766 with srcpeer.commandexecutor() as e:
760 767 rootnode = e.callcommand(
761 768 b'lookup',
762 769 {
763 770 b'key': b'0',
764 771 },
765 772 ).result()
766 773
767 774 if rootnode != sha1nodeconstants.nullid:
768 775 sharepath = os.path.join(sharepool, hex(rootnode))
769 776 else:
770 777 ui.status(
771 778 _(
772 779 b'(not using pooled storage: '
773 780 b'remote appears to be empty)\n'
774 781 )
775 782 )
776 783 except error.RepoLookupError:
777 784 ui.status(
778 785 _(
779 786 b'(not using pooled storage: '
780 787 b'unable to resolve identity of remote)\n'
781 788 )
782 789 )
783 790 elif sharenamemode == b'remote':
784 791 sharepath = os.path.join(
785 792 sharepool, hex(hashutil.sha1(source).digest())
786 793 )
787 794 else:
788 795 raise error.Abort(
789 796 _(b'unknown share naming mode: %s') % sharenamemode
790 797 )
791 798
792 799 # TODO this is a somewhat arbitrary restriction.
793 800 if narrow:
794 801 ui.status(
795 802 _(b'(pooled storage not supported for narrow clones)\n')
796 803 )
797 804 sharepath = None
798 805
799 806 if sharepath:
800 807 return clonewithshare(
801 808 ui,
802 809 peeropts,
803 810 sharepath,
804 811 source,
805 812 srcpeer,
806 813 dest,
807 814 pull=pull,
808 815 rev=revs,
809 816 update=update,
810 817 stream=stream,
811 818 )
812 819
813 820 srcrepo = srcpeer.local()
814 821
815 822 abspath = origsource
816 823 if islocal(origsource):
817 824 abspath = util.abspath(urlutil.urllocalpath(origsource))
818 825
819 826 if islocal(dest):
820 827 if os.path.exists(dest):
821 828 # only clean up directories we create ourselves
822 829 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
823 830 cleandir = hgdir
824 831 else:
825 832 cleandir = dest
826 833
827 834 copy = False
828 835 if (
829 836 srcrepo
830 837 and srcrepo.cancopy()
831 838 and islocal(dest)
832 839 and not phases.hassecret(srcrepo)
833 840 ):
834 841 copy = not pull and not revs
835 842
836 843 # TODO this is a somewhat arbitrary restriction.
837 844 if narrow:
838 845 copy = False
839 846
840 847 if copy:
841 848 try:
842 849 # we use a lock here because if we race with commit, we
843 850 # can end up with extra data in the cloned revlogs that's
844 851 # not pointed to by changesets, thus causing verify to
845 852 # fail
846 853 srclock = srcrepo.lock(wait=False)
847 854 except error.LockError:
848 855 copy = False
849 856
850 857 if copy:
851 858 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
852 859
853 860 destrootpath = urlutil.urllocalpath(dest)
854 861 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
855 862 localrepo.createrepository(
856 863 ui,
857 864 destrootpath,
858 865 requirements=dest_reqs,
859 866 )
860 867 destrepo = localrepo.makelocalrepository(ui, destrootpath)
861 868
862 869 destwlock = destrepo.wlock()
863 870 destlock = destrepo.lock()
864 871 from . import streamclone # avoid cycle
865 872
866 873 streamclone.local_copy(srcrepo, destrepo)
867 874
868 875 # we need to re-init the repo after manually copying the data
869 876 # into it
870 877 destpeer = peer(srcrepo, peeropts, dest)
871 878
872 879 # make the peer aware that is it already locked
873 880 #
874 881 # important:
875 882 #
876 883 # We still need to release that lock at the end of the function
877 884 destpeer.local()._lockref = weakref.ref(destlock)
878 885 destpeer.local()._wlockref = weakref.ref(destwlock)
879 886 # dirstate also needs to be copied because `_wlockref` has a reference
880 887 # to it: this dirstate is saved to disk when the wlock is released
881 888 destpeer.local().dirstate = destrepo.dirstate
882 889
883 890 srcrepo.hook(
884 891 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
885 892 )
886 893 else:
887 894 try:
888 895 # only pass ui when no srcrepo
889 896 destpeer = peer(
890 897 srcrepo or ui,
891 898 peeropts,
892 899 dest,
893 900 create=True,
894 901 createopts=createopts,
895 902 )
896 903 except FileExistsError:
897 904 cleandir = None
898 905 raise error.Abort(_(b"destination '%s' already exists") % dest)
899 906
900 907 if revs:
901 908 if not srcpeer.capable(b'lookup'):
902 909 raise error.Abort(
903 910 _(
904 911 b"src repository does not support "
905 912 b"revision lookup and so doesn't "
906 913 b"support clone by revision"
907 914 )
908 915 )
909 916
910 917 # TODO this is batchable.
911 918 remoterevs = []
912 919 for rev in revs:
913 920 with srcpeer.commandexecutor() as e:
914 921 remoterevs.append(
915 922 e.callcommand(
916 923 b'lookup',
917 924 {
918 925 b'key': rev,
919 926 },
920 927 ).result()
921 928 )
922 929 revs = remoterevs
923 930
924 931 checkout = revs[0]
925 932 else:
926 933 revs = None
927 934 local = destpeer.local()
928 935 if local:
929 936 if narrow:
930 937 with local.wlock(), local.lock():
931 938 local.setnarrowpats(storeincludepats, storeexcludepats)
932 939 narrowspec.copytoworkingcopy(local)
933 940
934 941 u = urlutil.url(abspath)
935 942 defaulturl = bytes(u)
936 943 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
937 944 if not stream:
938 945 if pull:
939 946 stream = False
940 947 else:
941 948 stream = None
942 949 # internal config: ui.quietbookmarkmove
943 950 overrides = {(b'ui', b'quietbookmarkmove'): True}
944 951 with local.ui.configoverride(overrides, b'clone'):
945 952 exchange.pull(
946 953 local,
947 954 srcpeer,
948 955 heads=revs,
949 956 streamclonerequested=stream,
950 957 includepats=storeincludepats,
951 958 excludepats=storeexcludepats,
952 959 depth=depth,
953 960 )
954 961 elif srcrepo:
955 962 # TODO lift restriction once exchange.push() accepts narrow
956 963 # push.
957 964 if narrow:
958 965 raise error.Abort(
959 966 _(
960 967 b'narrow clone not available for '
961 968 b'remote destinations'
962 969 )
963 970 )
964 971
965 972 exchange.push(
966 973 srcrepo,
967 974 destpeer,
968 975 revs=revs,
969 976 bookmarks=srcrepo._bookmarks.keys(),
970 977 )
971 978 else:
972 979 raise error.Abort(
973 980 _(b"clone from remote to remote not supported")
974 981 )
975 982
976 983 cleandir = None
977 984
978 985 destrepo = destpeer.local()
979 986 if destrepo:
980 987 template = uimod.samplehgrcs[b'cloned']
981 988 u = urlutil.url(abspath)
982 989 u.passwd = None
983 990 defaulturl = bytes(u)
984 991 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
985 992 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
986 993
987 994 if ui.configbool(b'experimental', b'remotenames'):
988 995 logexchange.pullremotenames(destrepo, srcpeer)
989 996
990 997 if update:
991 998 if update is not True:
992 999 with srcpeer.commandexecutor() as e:
993 1000 checkout = e.callcommand(
994 1001 b'lookup',
995 1002 {
996 1003 b'key': update,
997 1004 },
998 1005 ).result()
999 1006
1000 1007 uprev = None
1001 1008 status = None
1002 1009 if checkout is not None:
1003 1010 # Some extensions (at least hg-git and hg-subversion) have
1004 1011 # a peer.lookup() implementation that returns a name instead
1005 1012 # of a nodeid. We work around it here until we've figured
1006 1013 # out a better solution.
1007 1014 if len(checkout) == 20 and checkout in destrepo:
1008 1015 uprev = checkout
1009 1016 elif scmutil.isrevsymbol(destrepo, checkout):
1010 1017 uprev = scmutil.revsymbol(destrepo, checkout).node()
1011 1018 else:
1012 1019 if update is not True:
1013 1020 try:
1014 1021 uprev = destrepo.lookup(update)
1015 1022 except error.RepoLookupError:
1016 1023 pass
1017 1024 if uprev is None:
1018 1025 try:
1019 1026 if destrepo._activebookmark:
1020 1027 uprev = destrepo.lookup(destrepo._activebookmark)
1021 1028 update = destrepo._activebookmark
1022 1029 else:
1023 1030 uprev = destrepo._bookmarks[b'@']
1024 1031 update = b'@'
1025 1032 bn = destrepo[uprev].branch()
1026 1033 if bn == b'default':
1027 1034 status = _(b"updating to bookmark %s\n" % update)
1028 1035 else:
1029 1036 status = (
1030 1037 _(b"updating to bookmark %s on branch %s\n")
1031 1038 ) % (update, bn)
1032 1039 except KeyError:
1033 1040 try:
1034 1041 uprev = destrepo.branchtip(b'default')
1035 1042 except error.RepoLookupError:
1036 1043 uprev = destrepo.lookup(b'tip')
1037 1044 if not status:
1038 1045 bn = destrepo[uprev].branch()
1039 1046 status = _(b"updating to branch %s\n") % bn
1040 1047 destrepo.ui.status(status)
1041 1048 _update(destrepo, uprev)
1042 1049 if update in destrepo._bookmarks:
1043 1050 bookmarks.activate(destrepo, update)
1044 1051 if destlock is not None:
1045 1052 release(destlock)
1046 1053 if destwlock is not None:
1047 1054 release(destlock)
1048 1055 # here is a tiny windows were someone could end up writing the
1049 1056 # repository before the cache are sure to be warm. This is "fine"
1050 1057 # as the only "bad" outcome would be some slowness. That potential
1051 1058 # slowness already affect reader.
1052 1059 with destrepo.lock():
1053 1060 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1054 1061 finally:
1055 1062 release(srclock, destlock, destwlock)
1056 1063 if cleandir is not None:
1057 1064 shutil.rmtree(cleandir, True)
1058 1065 if srcpeer is not None:
1059 1066 srcpeer.close()
1060 1067 if destpeer and destpeer.local() is None:
1061 1068 destpeer.close()
1062 1069 return srcpeer, destpeer
1063 1070
1064 1071
1065 1072 def _showstats(repo, stats, quietempty=False):
1066 1073 if quietempty and stats.isempty():
1067 1074 return
1068 1075 repo.ui.status(
1069 1076 _(
1070 1077 b"%d files updated, %d files merged, "
1071 1078 b"%d files removed, %d files unresolved\n"
1072 1079 )
1073 1080 % (
1074 1081 stats.updatedcount,
1075 1082 stats.mergedcount,
1076 1083 stats.removedcount,
1077 1084 stats.unresolvedcount,
1078 1085 )
1079 1086 )
1080 1087
1081 1088
1082 1089 def updaterepo(repo, node, overwrite, updatecheck=None):
1083 1090 """Update the working directory to node.
1084 1091
1085 1092 When overwrite is set, changes are clobbered, merged else
1086 1093
1087 1094 returns stats (see pydoc mercurial.merge.applyupdates)"""
1088 1095 repo.ui.deprecwarn(
1089 1096 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1090 1097 b'5.7',
1091 1098 )
1092 1099 return mergemod._update(
1093 1100 repo,
1094 1101 node,
1095 1102 branchmerge=False,
1096 1103 force=overwrite,
1097 1104 labels=[b'working copy', b'destination'],
1098 1105 updatecheck=updatecheck,
1099 1106 )
1100 1107
1101 1108
1102 1109 def update(repo, node, quietempty=False, updatecheck=None):
1103 1110 """update the working directory to node"""
1104 1111 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1105 1112 _showstats(repo, stats, quietempty)
1106 1113 if stats.unresolvedcount:
1107 1114 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1108 1115 return stats.unresolvedcount > 0
1109 1116
1110 1117
1111 1118 # naming conflict in clone()
1112 1119 _update = update
1113 1120
1114 1121
1115 1122 def clean(repo, node, show_stats=True, quietempty=False):
1116 1123 """forcibly switch the working directory to node, clobbering changes"""
1117 1124 stats = mergemod.clean_update(repo[node])
1118 1125 assert stats.unresolvedcount == 0
1119 1126 if show_stats:
1120 1127 _showstats(repo, stats, quietempty)
1121 1128 return False
1122 1129
1123 1130
1124 1131 # naming conflict in updatetotally()
1125 1132 _clean = clean
1126 1133
1127 1134 _VALID_UPDATECHECKS = {
1128 1135 mergemod.UPDATECHECK_ABORT,
1129 1136 mergemod.UPDATECHECK_NONE,
1130 1137 mergemod.UPDATECHECK_LINEAR,
1131 1138 mergemod.UPDATECHECK_NO_CONFLICT,
1132 1139 }
1133 1140
1134 1141
1135 1142 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1136 1143 """Update the working directory with extra care for non-file components
1137 1144
1138 1145 This takes care of non-file components below:
1139 1146
1140 1147 :bookmark: might be advanced or (in)activated
1141 1148
1142 1149 This takes arguments below:
1143 1150
1144 1151 :checkout: to which revision the working directory is updated
1145 1152 :brev: a name, which might be a bookmark to be activated after updating
1146 1153 :clean: whether changes in the working directory can be discarded
1147 1154 :updatecheck: how to deal with a dirty working directory
1148 1155
1149 1156 Valid values for updatecheck are the UPDATECHECK_* constants
1150 1157 defined in the merge module. Passing `None` will result in using the
1151 1158 configured default.
1152 1159
1153 1160 * ABORT: abort if the working directory is dirty
1154 1161 * NONE: don't check (merge working directory changes into destination)
1155 1162 * LINEAR: check that update is linear before merging working directory
1156 1163 changes into destination
1157 1164 * NO_CONFLICT: check that the update does not result in file merges
1158 1165
1159 1166 This returns whether conflict is detected at updating or not.
1160 1167 """
1161 1168 if updatecheck is None:
1162 1169 updatecheck = ui.config(b'commands', b'update.check')
1163 1170 if updatecheck not in _VALID_UPDATECHECKS:
1164 1171 # If not configured, or invalid value configured
1165 1172 updatecheck = mergemod.UPDATECHECK_LINEAR
1166 1173 if updatecheck not in _VALID_UPDATECHECKS:
1167 1174 raise ValueError(
1168 1175 r'Invalid updatecheck value %r (can accept %r)'
1169 1176 % (updatecheck, _VALID_UPDATECHECKS)
1170 1177 )
1171 1178 with repo.wlock():
1172 1179 movemarkfrom = None
1173 1180 warndest = False
1174 1181 if checkout is None:
1175 1182 updata = destutil.destupdate(repo, clean=clean)
1176 1183 checkout, movemarkfrom, brev = updata
1177 1184 warndest = True
1178 1185
1179 1186 if clean:
1180 1187 ret = _clean(repo, checkout)
1181 1188 else:
1182 1189 if updatecheck == mergemod.UPDATECHECK_ABORT:
1183 1190 cmdutil.bailifchanged(repo, merge=False)
1184 1191 updatecheck = mergemod.UPDATECHECK_NONE
1185 1192 ret = _update(repo, checkout, updatecheck=updatecheck)
1186 1193
1187 1194 if not ret and movemarkfrom:
1188 1195 if movemarkfrom == repo[b'.'].node():
1189 1196 pass # no-op update
1190 1197 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1191 1198 b = ui.label(repo._activebookmark, b'bookmarks.active')
1192 1199 ui.status(_(b"updating bookmark %s\n") % b)
1193 1200 else:
1194 1201 # this can happen with a non-linear update
1195 1202 b = ui.label(repo._activebookmark, b'bookmarks')
1196 1203 ui.status(_(b"(leaving bookmark %s)\n") % b)
1197 1204 bookmarks.deactivate(repo)
1198 1205 elif brev in repo._bookmarks:
1199 1206 if brev != repo._activebookmark:
1200 1207 b = ui.label(brev, b'bookmarks.active')
1201 1208 ui.status(_(b"(activating bookmark %s)\n") % b)
1202 1209 bookmarks.activate(repo, brev)
1203 1210 elif brev:
1204 1211 if repo._activebookmark:
1205 1212 b = ui.label(repo._activebookmark, b'bookmarks')
1206 1213 ui.status(_(b"(leaving bookmark %s)\n") % b)
1207 1214 bookmarks.deactivate(repo)
1208 1215
1209 1216 if warndest:
1210 1217 destutil.statusotherdests(ui, repo)
1211 1218
1212 1219 return ret
1213 1220
1214 1221
1215 1222 def merge(
1216 1223 ctx,
1217 1224 force=False,
1218 1225 remind=True,
1219 1226 labels=None,
1220 1227 ):
1221 1228 """Branch merge with node, resolving changes. Return true if any
1222 1229 unresolved conflicts."""
1223 1230 repo = ctx.repo()
1224 1231 stats = mergemod.merge(ctx, force=force, labels=labels)
1225 1232 _showstats(repo, stats)
1226 1233 if stats.unresolvedcount:
1227 1234 repo.ui.status(
1228 1235 _(
1229 1236 b"use 'hg resolve' to retry unresolved file merges "
1230 1237 b"or 'hg merge --abort' to abandon\n"
1231 1238 )
1232 1239 )
1233 1240 elif remind:
1234 1241 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1235 1242 return stats.unresolvedcount > 0
1236 1243
1237 1244
1238 1245 def abortmerge(ui, repo):
1239 1246 ms = mergestatemod.mergestate.read(repo)
1240 1247 if ms.active():
1241 1248 # there were conflicts
1242 1249 node = ms.localctx.hex()
1243 1250 else:
1244 1251 # there were no conficts, mergestate was not stored
1245 1252 node = repo[b'.'].hex()
1246 1253
1247 1254 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1248 1255 stats = mergemod.clean_update(repo[node])
1249 1256 assert stats.unresolvedcount == 0
1250 1257 _showstats(repo, stats)
1251 1258
1252 1259
1253 1260 def _incoming(
1254 1261 displaychlist,
1255 1262 subreporecurse,
1256 1263 ui,
1257 1264 repo,
1258 1265 source,
1259 1266 opts,
1260 1267 buffered=False,
1261 1268 subpath=None,
1262 1269 ):
1263 1270 """
1264 1271 Helper for incoming / gincoming.
1265 1272 displaychlist gets called with
1266 1273 (remoterepo, incomingchangesetlist, displayer) parameters,
1267 1274 and is supposed to contain only code that can't be unified.
1268 1275 """
1269 1276 srcs = urlutil.get_pull_paths(repo, ui, [source])
1270 1277 srcs = list(srcs)
1271 1278 if len(srcs) != 1:
1272 1279 msg = _(b'for now, incoming supports only a single source, %d provided')
1273 1280 msg %= len(srcs)
1274 1281 raise error.Abort(msg)
1275 1282 path = srcs[0]
1276 1283 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1277 1284 if subpath is not None:
1278 1285 subpath = urlutil.url(subpath)
1279 1286 if subpath.isabs():
1280 1287 source = bytes(subpath)
1281 1288 else:
1282 1289 p = urlutil.url(source)
1283 1290 if p.islocal():
1284 1291 normpath = os.path.normpath
1285 1292 else:
1286 1293 normpath = posixpath.normpath
1287 1294 p.path = normpath(b'%s/%s' % (p.path, subpath))
1288 1295 source = bytes(p)
1289 1296 other = peer(repo, opts, source)
1290 1297 cleanupfn = other.close
1291 1298 try:
1292 1299 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1293 1300 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1294 1301
1295 1302 if revs:
1296 1303 revs = [other.lookup(rev) for rev in revs]
1297 1304 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1298 1305 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1299 1306 )
1300 1307
1301 1308 if not chlist:
1302 1309 ui.status(_(b"no changes found\n"))
1303 1310 return subreporecurse()
1304 1311 ui.pager(b'incoming')
1305 1312 displayer = logcmdutil.changesetdisplayer(
1306 1313 ui, other, opts, buffered=buffered
1307 1314 )
1308 1315 displaychlist(other, chlist, displayer)
1309 1316 displayer.close()
1310 1317 finally:
1311 1318 cleanupfn()
1312 1319 subreporecurse()
1313 1320 return 0 # exit code is zero since we found incoming changes
1314 1321
1315 1322
1316 1323 def incoming(ui, repo, source, opts, subpath=None):
1317 1324 def subreporecurse():
1318 1325 ret = 1
1319 1326 if opts.get(b'subrepos'):
1320 1327 ctx = repo[None]
1321 1328 for subpath in sorted(ctx.substate):
1322 1329 sub = ctx.sub(subpath)
1323 1330 ret = min(ret, sub.incoming(ui, source, opts))
1324 1331 return ret
1325 1332
1326 1333 def display(other, chlist, displayer):
1327 1334 limit = logcmdutil.getlimit(opts)
1328 1335 if opts.get(b'newest_first'):
1329 1336 chlist.reverse()
1330 1337 count = 0
1331 1338 for n in chlist:
1332 1339 if limit is not None and count >= limit:
1333 1340 break
1334 1341 parents = [
1335 1342 p for p in other.changelog.parents(n) if p != repo.nullid
1336 1343 ]
1337 1344 if opts.get(b'no_merges') and len(parents) == 2:
1338 1345 continue
1339 1346 count += 1
1340 1347 displayer.show(other[n])
1341 1348
1342 1349 return _incoming(
1343 1350 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1344 1351 )
1345 1352
1346 1353
1347 1354 def _outgoing(ui, repo, dests, opts, subpath=None):
1348 1355 out = set()
1349 1356 others = []
1350 1357 for path in urlutil.get_push_paths(repo, ui, dests):
1351 1358 dest = path.pushloc or path.loc
1352 1359 if subpath is not None:
1353 1360 subpath = urlutil.url(subpath)
1354 1361 if subpath.isabs():
1355 1362 dest = bytes(subpath)
1356 1363 else:
1357 1364 p = urlutil.url(dest)
1358 1365 if p.islocal():
1359 1366 normpath = os.path.normpath
1360 1367 else:
1361 1368 normpath = posixpath.normpath
1362 1369 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 1370 dest = bytes(p)
1364 1371 branches = path.branch, opts.get(b'branch') or []
1365 1372
1366 1373 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1367 1374 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1368 1375 if revs:
1369 1376 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1370 1377
1371 1378 other = peer(repo, opts, dest)
1372 1379 try:
1373 1380 outgoing = discovery.findcommonoutgoing(
1374 1381 repo, other, revs, force=opts.get(b'force')
1375 1382 )
1376 1383 o = outgoing.missing
1377 1384 out.update(o)
1378 1385 if not o:
1379 1386 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1380 1387 others.append(other)
1381 1388 except: # re-raises
1382 1389 other.close()
1383 1390 raise
1384 1391 # make sure this is ordered by revision number
1385 1392 outgoing_revs = list(out)
1386 1393 cl = repo.changelog
1387 1394 outgoing_revs.sort(key=cl.rev)
1388 1395 return outgoing_revs, others
1389 1396
1390 1397
1391 1398 def _outgoing_recurse(ui, repo, dests, opts):
1392 1399 ret = 1
1393 1400 if opts.get(b'subrepos'):
1394 1401 ctx = repo[None]
1395 1402 for subpath in sorted(ctx.substate):
1396 1403 sub = ctx.sub(subpath)
1397 1404 ret = min(ret, sub.outgoing(ui, dests, opts))
1398 1405 return ret
1399 1406
1400 1407
1401 1408 def _outgoing_filter(repo, revs, opts):
1402 1409 """apply revision filtering/ordering option for outgoing"""
1403 1410 limit = logcmdutil.getlimit(opts)
1404 1411 no_merges = opts.get(b'no_merges')
1405 1412 if opts.get(b'newest_first'):
1406 1413 revs.reverse()
1407 1414 if limit is None and not no_merges:
1408 1415 for r in revs:
1409 1416 yield r
1410 1417 return
1411 1418
1412 1419 count = 0
1413 1420 cl = repo.changelog
1414 1421 for n in revs:
1415 1422 if limit is not None and count >= limit:
1416 1423 break
1417 1424 parents = [p for p in cl.parents(n) if p != repo.nullid]
1418 1425 if no_merges and len(parents) == 2:
1419 1426 continue
1420 1427 count += 1
1421 1428 yield n
1422 1429
1423 1430
1424 1431 def outgoing(ui, repo, dests, opts, subpath=None):
1425 1432 if opts.get(b'graph'):
1426 1433 logcmdutil.checkunsupportedgraphflags([], opts)
1427 1434 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1428 1435 ret = 1
1429 1436 try:
1430 1437 if o:
1431 1438 ret = 0
1432 1439
1433 1440 if opts.get(b'graph'):
1434 1441 revdag = logcmdutil.graphrevs(repo, o, opts)
1435 1442 ui.pager(b'outgoing')
1436 1443 displayer = logcmdutil.changesetdisplayer(
1437 1444 ui, repo, opts, buffered=True
1438 1445 )
1439 1446 logcmdutil.displaygraph(
1440 1447 ui, repo, revdag, displayer, graphmod.asciiedges
1441 1448 )
1442 1449 else:
1443 1450 ui.pager(b'outgoing')
1444 1451 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1445 1452 for n in _outgoing_filter(repo, o, opts):
1446 1453 displayer.show(repo[n])
1447 1454 displayer.close()
1448 1455 for oth in others:
1449 1456 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1450 1457 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1451 1458 return ret # exit code is zero since we found outgoing changes
1452 1459 finally:
1453 1460 for oth in others:
1454 1461 oth.close()
1455 1462
1456 1463
1457 1464 def verify(repo, level=None):
1458 1465 """verify the consistency of a repository"""
1459 1466 ret = verifymod.verify(repo, level=level)
1460 1467
1461 1468 # Broken subrepo references in hidden csets don't seem worth worrying about,
1462 1469 # since they can't be pushed/pulled, and --hidden can be used if they are a
1463 1470 # concern.
1464 1471
1465 1472 # pathto() is needed for -R case
1466 1473 revs = repo.revs(
1467 1474 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1468 1475 )
1469 1476
1470 1477 if revs:
1471 1478 repo.ui.status(_(b'checking subrepo links\n'))
1472 1479 for rev in revs:
1473 1480 ctx = repo[rev]
1474 1481 try:
1475 1482 for subpath in ctx.substate:
1476 1483 try:
1477 1484 ret = (
1478 1485 ctx.sub(subpath, allowcreate=False).verify() or ret
1479 1486 )
1480 1487 except error.RepoError as e:
1481 1488 repo.ui.warn(b'%d: %s\n' % (rev, e))
1482 1489 except Exception:
1483 1490 repo.ui.warn(
1484 1491 _(b'.hgsubstate is corrupt in revision %s\n')
1485 1492 % short(ctx.node())
1486 1493 )
1487 1494
1488 1495 return ret
1489 1496
1490 1497
1491 1498 def remoteui(src, opts):
1492 1499 """build a remote ui from ui or repo and opts"""
1493 1500 if util.safehasattr(src, b'baseui'): # looks like a repository
1494 1501 dst = src.baseui.copy() # drop repo-specific config
1495 1502 src = src.ui # copy target options from repo
1496 1503 else: # assume it's a global ui object
1497 1504 dst = src.copy() # keep all global options
1498 1505
1499 1506 # copy ssh-specific options
1500 1507 for o in b'ssh', b'remotecmd':
1501 1508 v = opts.get(o) or src.config(b'ui', o)
1502 1509 if v:
1503 1510 dst.setconfig(b"ui", o, v, b'copied')
1504 1511
1505 1512 # copy bundle-specific options
1506 1513 r = src.config(b'bundle', b'mainreporoot')
1507 1514 if r:
1508 1515 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1509 1516
1510 1517 # copy selected local settings to the remote ui
1511 1518 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1512 1519 for key, val in src.configitems(sect):
1513 1520 dst.setconfig(sect, key, val, b'copied')
1514 1521 v = src.config(b'web', b'cacerts')
1515 1522 if v:
1516 1523 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1517 1524
1518 1525 return dst
1519 1526
1520 1527
1521 1528 # Files of interest
1522 1529 # Used to check if the repository has changed looking at mtime and size of
1523 1530 # these files.
1524 1531 foi = [
1525 1532 (b'spath', b'00changelog.i'),
1526 1533 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1527 1534 (b'spath', b'obsstore'),
1528 1535 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1529 1536 ]
1530 1537
1531 1538
1532 1539 class cachedlocalrepo:
1533 1540 """Holds a localrepository that can be cached and reused."""
1534 1541
1535 1542 def __init__(self, repo):
1536 1543 """Create a new cached repo from an existing repo.
1537 1544
1538 1545 We assume the passed in repo was recently created. If the
1539 1546 repo has changed between when it was created and when it was
1540 1547 turned into a cache, it may not refresh properly.
1541 1548 """
1542 1549 assert isinstance(repo, localrepo.localrepository)
1543 1550 self._repo = repo
1544 1551 self._state, self.mtime = self._repostate()
1545 1552 self._filtername = repo.filtername
1546 1553
1547 1554 def fetch(self):
1548 1555 """Refresh (if necessary) and return a repository.
1549 1556
1550 1557 If the cached instance is out of date, it will be recreated
1551 1558 automatically and returned.
1552 1559
1553 1560 Returns a tuple of the repo and a boolean indicating whether a new
1554 1561 repo instance was created.
1555 1562 """
1556 1563 # We compare the mtimes and sizes of some well-known files to
1557 1564 # determine if the repo changed. This is not precise, as mtimes
1558 1565 # are susceptible to clock skew and imprecise filesystems and
1559 1566 # file content can change while maintaining the same size.
1560 1567
1561 1568 state, mtime = self._repostate()
1562 1569 if state == self._state:
1563 1570 return self._repo, False
1564 1571
1565 1572 repo = repository(self._repo.baseui, self._repo.url())
1566 1573 if self._filtername:
1567 1574 self._repo = repo.filtered(self._filtername)
1568 1575 else:
1569 1576 self._repo = repo.unfiltered()
1570 1577 self._state = state
1571 1578 self.mtime = mtime
1572 1579
1573 1580 return self._repo, True
1574 1581
1575 1582 def _repostate(self):
1576 1583 state = []
1577 1584 maxmtime = -1
1578 1585 for attr, fname in foi:
1579 1586 prefix = getattr(self._repo, attr)
1580 1587 p = os.path.join(prefix, fname)
1581 1588 try:
1582 1589 st = os.stat(p)
1583 1590 except OSError:
1584 1591 st = os.stat(prefix)
1585 1592 state.append((st[stat.ST_MTIME], st.st_size))
1586 1593 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1587 1594
1588 1595 return tuple(state), maxmtime
1589 1596
1590 1597 def copy(self):
1591 1598 """Obtain a copy of this class instance.
1592 1599
1593 1600 A new localrepository instance is obtained. The new instance should be
1594 1601 completely independent of the original.
1595 1602 """
1596 1603 repo = repository(self._repo.baseui, self._repo.origroot)
1597 1604 if self._filtername:
1598 1605 repo = repo.filtered(self._filtername)
1599 1606 else:
1600 1607 repo = repo.unfiltered()
1601 1608 c = cachedlocalrepo(repo)
1602 1609 c._state = self._state
1603 1610 c.mtime = self.mtime
1604 1611 return c
General Comments 0
You need to be logged in to leave comments. Login now