##// END OF EJS Templates
hg: show the correct message when cloning an LFS repo with extension disabled...
Matt Harbison -
r50670:7d6c8943 stable
parent child Browse files
Show More
@@ -1,1602 +1,1602 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def _local(path):
69 69 path = util.expandpath(urlutil.urllocalpath(path))
70 70
71 71 try:
72 72 # we use os.stat() directly here instead of os.path.isfile()
73 73 # because the latter started returning `False` on invalid path
74 74 # exceptions starting in 3.8 and we care about handling
75 75 # invalid paths specially here.
76 76 st = os.stat(path)
77 77 isfile = stat.S_ISREG(st.st_mode)
78 78 except ValueError as e:
79 79 raise error.Abort(
80 80 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 81 )
82 82 except OSError:
83 83 isfile = False
84 84
85 85 return isfile and bundlerepo or localrepo
86 86
87 87
88 88 def addbranchrevs(lrepo, other, branches, revs):
89 89 peer = other.peer() # a courtesy to callers using a localrepo for other
90 90 hashbranch, branches = branches
91 91 if not hashbranch and not branches:
92 92 x = revs or None
93 93 if revs:
94 94 y = revs[0]
95 95 else:
96 96 y = None
97 97 return x, y
98 98 if revs:
99 99 revs = list(revs)
100 100 else:
101 101 revs = []
102 102
103 103 if not peer.capable(b'branchmap'):
104 104 if branches:
105 105 raise error.Abort(_(b"remote branch lookup not supported"))
106 106 revs.append(hashbranch)
107 107 return revs, revs[0]
108 108
109 109 with peer.commandexecutor() as e:
110 110 branchmap = e.callcommand(b'branchmap', {}).result()
111 111
112 112 def primary(branch):
113 113 if branch == b'.':
114 114 if not lrepo:
115 115 raise error.Abort(_(b"dirstate branch not accessible"))
116 116 branch = lrepo.dirstate.branch()
117 117 if branch in branchmap:
118 118 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 119 return True
120 120 else:
121 121 return False
122 122
123 123 for branch in branches:
124 124 if not primary(branch):
125 125 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 126 if hashbranch:
127 127 if not primary(hashbranch):
128 128 revs.append(hashbranch)
129 129 return revs, revs[0]
130 130
131 131
132 132 schemes = {
133 133 b'bundle': bundlerepo,
134 134 b'union': unionrepo,
135 135 b'file': _local,
136 136 b'http': httppeer,
137 137 b'https': httppeer,
138 138 b'ssh': sshpeer,
139 139 b'static-http': statichttprepo,
140 140 }
141 141
142 142
143 143 def _peerlookup(path):
144 144 u = urlutil.url(path)
145 145 scheme = u.scheme or b'file'
146 146 thing = schemes.get(scheme) or schemes[b'file']
147 147 try:
148 148 return thing(path)
149 149 except TypeError:
150 150 # we can't test callable(thing) because 'thing' can be an unloaded
151 151 # module that implements __call__
152 152 if not util.safehasattr(thing, b'instance'):
153 153 raise
154 154 return thing
155 155
156 156
157 157 def islocal(repo):
158 158 '''return true if repo (or path pointing to repo) is local'''
159 159 if isinstance(repo, bytes):
160 160 try:
161 161 return _peerlookup(repo).islocal(repo)
162 162 except AttributeError:
163 163 return False
164 164 return repo.local()
165 165
166 166
167 167 def openpath(ui, path, sendaccept=True):
168 168 '''open path with open if local, url.open if remote'''
169 169 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
170 170 if pathurl.islocal():
171 171 return util.posixfile(pathurl.localpath(), b'rb')
172 172 else:
173 173 return url.open(ui, path, sendaccept=sendaccept)
174 174
175 175
176 176 # a list of (ui, repo) functions called for wire peer initialization
177 177 wirepeersetupfuncs = []
178 178
179 179
180 180 def _peerorrepo(
181 181 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
182 182 ):
183 183 """return a repository object for the specified path"""
184 184 obj = _peerlookup(path).instance(
185 185 ui, path, create, intents=intents, createopts=createopts
186 186 )
187 187 ui = getattr(obj, "ui", ui)
188 188 for f in presetupfuncs or []:
189 189 f(ui, obj)
190 190 ui.log(b'extension', b'- executing reposetup hooks\n')
191 191 with util.timedcm('all reposetup') as allreposetupstats:
192 192 for name, module in extensions.extensions(ui):
193 193 ui.log(b'extension', b' - running reposetup for %s\n', name)
194 194 hook = getattr(module, 'reposetup', None)
195 195 if hook:
196 196 with util.timedcm('reposetup %r', name) as stats:
197 197 hook(ui, obj)
198 198 ui.log(
199 199 b'extension', b' > reposetup for %s took %s\n', name, stats
200 200 )
201 201 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
202 202 if not obj.local():
203 203 for f in wirepeersetupfuncs:
204 204 f(ui, obj)
205 205 return obj
206 206
207 207
208 208 def repository(
209 209 ui,
210 210 path=b'',
211 211 create=False,
212 212 presetupfuncs=None,
213 213 intents=None,
214 214 createopts=None,
215 215 ):
216 216 """return a repository object for the specified path"""
217 217 peer = _peerorrepo(
218 218 ui,
219 219 path,
220 220 create,
221 221 presetupfuncs=presetupfuncs,
222 222 intents=intents,
223 223 createopts=createopts,
224 224 )
225 225 repo = peer.local()
226 226 if not repo:
227 227 raise error.Abort(
228 228 _(b"repository '%s' is not local") % (path or peer.url())
229 229 )
230 230 return repo.filtered(b'visible')
231 231
232 232
233 233 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
234 234 '''return a repository peer for the specified path'''
235 235 rui = remoteui(uiorrepo, opts)
236 236 return _peerorrepo(
237 237 rui, path, create, intents=intents, createopts=createopts
238 238 ).peer()
239 239
240 240
241 241 def defaultdest(source):
242 242 """return default destination of clone if none is given
243 243
244 244 >>> defaultdest(b'foo')
245 245 'foo'
246 246 >>> defaultdest(b'/foo/bar')
247 247 'bar'
248 248 >>> defaultdest(b'/')
249 249 ''
250 250 >>> defaultdest(b'')
251 251 ''
252 252 >>> defaultdest(b'http://example.org/')
253 253 ''
254 254 >>> defaultdest(b'http://example.org/foo/')
255 255 'foo'
256 256 """
257 257 path = urlutil.url(source).path
258 258 if not path:
259 259 return b''
260 260 return os.path.basename(os.path.normpath(path))
261 261
262 262
263 263 def sharedreposource(repo):
264 264 """Returns repository object for source repository of a shared repo.
265 265
266 266 If repo is not a shared repository, returns None.
267 267 """
268 268 if repo.sharedpath == repo.path:
269 269 return None
270 270
271 271 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
272 272 return repo.srcrepo
273 273
274 274 # the sharedpath always ends in the .hg; we want the path to the repo
275 275 source = repo.vfs.split(repo.sharedpath)[0]
276 276 srcurl, branches = urlutil.parseurl(source)
277 277 srcrepo = repository(repo.ui, srcurl)
278 278 repo.srcrepo = srcrepo
279 279 return srcrepo
280 280
281 281
282 282 def share(
283 283 ui,
284 284 source,
285 285 dest=None,
286 286 update=True,
287 287 bookmarks=True,
288 288 defaultpath=None,
289 289 relative=False,
290 290 ):
291 291 '''create a shared repository'''
292 292
293 293 if not islocal(source):
294 294 raise error.Abort(_(b'can only share local repositories'))
295 295
296 296 if not dest:
297 297 dest = defaultdest(source)
298 298 else:
299 299 dest = urlutil.get_clone_path(ui, dest)[1]
300 300
301 301 if isinstance(source, bytes):
302 302 origsource, source, branches = urlutil.get_clone_path(ui, source)
303 303 srcrepo = repository(ui, source)
304 304 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
305 305 else:
306 306 srcrepo = source.local()
307 307 checkout = None
308 308
309 309 shareditems = set()
310 310 if bookmarks:
311 311 shareditems.add(sharedbookmarks)
312 312
313 313 r = repository(
314 314 ui,
315 315 dest,
316 316 create=True,
317 317 createopts={
318 318 b'sharedrepo': srcrepo,
319 319 b'sharedrelative': relative,
320 320 b'shareditems': shareditems,
321 321 },
322 322 )
323 323
324 324 postshare(srcrepo, r, defaultpath=defaultpath)
325 325 r = repository(ui, dest)
326 326 _postshareupdate(r, update, checkout=checkout)
327 327 return r
328 328
329 329
330 330 def _prependsourcehgrc(repo):
331 331 """copies the source repo config and prepend it in current repo .hg/hgrc
332 332 on unshare. This is only done if the share was perfomed using share safe
333 333 method where we share config of source in shares"""
334 334 srcvfs = vfsmod.vfs(repo.sharedpath)
335 335 dstvfs = vfsmod.vfs(repo.path)
336 336
337 337 if not srcvfs.exists(b'hgrc'):
338 338 return
339 339
340 340 currentconfig = b''
341 341 if dstvfs.exists(b'hgrc'):
342 342 currentconfig = dstvfs.read(b'hgrc')
343 343
344 344 with dstvfs(b'hgrc', b'wb') as fp:
345 345 sourceconfig = srcvfs.read(b'hgrc')
346 346 fp.write(b"# Config copied from shared source\n")
347 347 fp.write(sourceconfig)
348 348 fp.write(b'\n')
349 349 fp.write(currentconfig)
350 350
351 351
352 352 def unshare(ui, repo):
353 353 """convert a shared repository to a normal one
354 354
355 355 Copy the store data to the repo and remove the sharedpath data.
356 356
357 357 Returns a new repository object representing the unshared repository.
358 358
359 359 The passed repository object is not usable after this function is
360 360 called.
361 361 """
362 362
363 363 with repo.lock():
364 364 # we use locks here because if we race with commit, we
365 365 # can end up with extra data in the cloned revlogs that's
366 366 # not pointed to by changesets, thus causing verify to
367 367 # fail
368 368 destlock = copystore(ui, repo, repo.path)
369 369 with destlock or util.nullcontextmanager():
370 370 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
371 371 # we were sharing .hg/hgrc of the share source with the current
372 372 # repo. We need to copy that while unsharing otherwise it can
373 373 # disable hooks and other checks
374 374 _prependsourcehgrc(repo)
375 375
376 376 sharefile = repo.vfs.join(b'sharedpath')
377 377 util.rename(sharefile, sharefile + b'.old')
378 378
379 379 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
380 380 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
381 381 scmutil.writereporequirements(repo)
382 382
383 383 # Removing share changes some fundamental properties of the repo instance.
384 384 # So we instantiate a new repo object and operate on it rather than
385 385 # try to keep the existing repo usable.
386 386 newrepo = repository(repo.baseui, repo.root, create=False)
387 387
388 388 # TODO: figure out how to access subrepos that exist, but were previously
389 389 # removed from .hgsub
390 390 c = newrepo[b'.']
391 391 subs = c.substate
392 392 for s in sorted(subs):
393 393 c.sub(s).unshare()
394 394
395 395 localrepo.poisonrepository(repo)
396 396
397 397 return newrepo
398 398
399 399
400 400 def postshare(sourcerepo, destrepo, defaultpath=None):
401 401 """Called after a new shared repo is created.
402 402
403 403 The new repo only has a requirements file and pointer to the source.
404 404 This function configures additional shared data.
405 405
406 406 Extensions can wrap this function and write additional entries to
407 407 destrepo/.hg/shared to indicate additional pieces of data to be shared.
408 408 """
409 409 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
410 410 if default:
411 411 template = b'[paths]\ndefault = %s\n'
412 412 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
413 413 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
414 414 with destrepo.wlock():
415 415 narrowspec.copytoworkingcopy(destrepo)
416 416
417 417
418 418 def _postshareupdate(repo, update, checkout=None):
419 419 """Maybe perform a working directory update after a shared repo is created.
420 420
421 421 ``update`` can be a boolean or a revision to update to.
422 422 """
423 423 if not update:
424 424 return
425 425
426 426 repo.ui.status(_(b"updating working directory\n"))
427 427 if update is not True:
428 428 checkout = update
429 429 for test in (checkout, b'default', b'tip'):
430 430 if test is None:
431 431 continue
432 432 try:
433 433 uprev = repo.lookup(test)
434 434 break
435 435 except error.RepoLookupError:
436 436 continue
437 437 _update(repo, uprev)
438 438
439 439
440 440 def copystore(ui, srcrepo, destpath):
441 441 """copy files from store of srcrepo in destpath
442 442
443 443 returns destlock
444 444 """
445 445 destlock = None
446 446 try:
447 447 hardlink = None
448 448 topic = _(b'linking') if hardlink else _(b'copying')
449 449 with ui.makeprogress(topic, unit=_(b'files')) as progress:
450 450 num = 0
451 451 srcpublishing = srcrepo.publishing()
452 452 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
453 453 dstvfs = vfsmod.vfs(destpath)
454 454 for f in srcrepo.store.copylist():
455 455 if srcpublishing and f.endswith(b'phaseroots'):
456 456 continue
457 457 dstbase = os.path.dirname(f)
458 458 if dstbase and not dstvfs.exists(dstbase):
459 459 dstvfs.mkdir(dstbase)
460 460 if srcvfs.exists(f):
461 461 if f.endswith(b'data'):
462 462 # 'dstbase' may be empty (e.g. revlog format 0)
463 463 lockfile = os.path.join(dstbase, b"lock")
464 464 # lock to avoid premature writing to the target
465 465 destlock = lock.lock(dstvfs, lockfile)
466 466 hardlink, n = util.copyfiles(
467 467 srcvfs.join(f), dstvfs.join(f), hardlink, progress
468 468 )
469 469 num += n
470 470 if hardlink:
471 471 ui.debug(b"linked %d files\n" % num)
472 472 else:
473 473 ui.debug(b"copied %d files\n" % num)
474 474 return destlock
475 475 except: # re-raises
476 476 release(destlock)
477 477 raise
478 478
479 479
480 480 def clonewithshare(
481 481 ui,
482 482 peeropts,
483 483 sharepath,
484 484 source,
485 485 srcpeer,
486 486 dest,
487 487 pull=False,
488 488 rev=None,
489 489 update=True,
490 490 stream=False,
491 491 ):
492 492 """Perform a clone using a shared repo.
493 493
494 494 The store for the repository will be located at <sharepath>/.hg. The
495 495 specified revisions will be cloned or pulled from "source". A shared repo
496 496 will be created at "dest" and a working copy will be created if "update" is
497 497 True.
498 498 """
499 499 revs = None
500 500 if rev:
501 501 if not srcpeer.capable(b'lookup'):
502 502 raise error.Abort(
503 503 _(
504 504 b"src repository does not support "
505 505 b"revision lookup and so doesn't "
506 506 b"support clone by revision"
507 507 )
508 508 )
509 509
510 510 # TODO this is batchable.
511 511 remoterevs = []
512 512 for r in rev:
513 513 with srcpeer.commandexecutor() as e:
514 514 remoterevs.append(
515 515 e.callcommand(
516 516 b'lookup',
517 517 {
518 518 b'key': r,
519 519 },
520 520 ).result()
521 521 )
522 522 revs = remoterevs
523 523
524 524 # Obtain a lock before checking for or cloning the pooled repo otherwise
525 525 # 2 clients may race creating or populating it.
526 526 pooldir = os.path.dirname(sharepath)
527 527 # lock class requires the directory to exist.
528 528 try:
529 529 util.makedir(pooldir, False)
530 530 except FileExistsError:
531 531 pass
532 532
533 533 poolvfs = vfsmod.vfs(pooldir)
534 534 basename = os.path.basename(sharepath)
535 535
536 536 with lock.lock(poolvfs, b'%s.lock' % basename):
537 537 if os.path.exists(sharepath):
538 538 ui.status(
539 539 _(b'(sharing from existing pooled repository %s)\n') % basename
540 540 )
541 541 else:
542 542 ui.status(
543 543 _(b'(sharing from new pooled repository %s)\n') % basename
544 544 )
545 545 # Always use pull mode because hardlinks in share mode don't work
546 546 # well. Never update because working copies aren't necessary in
547 547 # share mode.
548 548 clone(
549 549 ui,
550 550 peeropts,
551 551 source,
552 552 dest=sharepath,
553 553 pull=True,
554 554 revs=rev,
555 555 update=False,
556 556 stream=stream,
557 557 )
558 558
559 559 # Resolve the value to put in [paths] section for the source.
560 560 if islocal(source):
561 561 defaultpath = util.abspath(urlutil.urllocalpath(source))
562 562 else:
563 563 defaultpath = source
564 564
565 565 sharerepo = repository(ui, path=sharepath)
566 566 destrepo = share(
567 567 ui,
568 568 sharerepo,
569 569 dest=dest,
570 570 update=False,
571 571 bookmarks=False,
572 572 defaultpath=defaultpath,
573 573 )
574 574
575 575 # We need to perform a pull against the dest repo to fetch bookmarks
576 576 # and other non-store data that isn't shared by default. In the case of
577 577 # non-existing shared repo, this means we pull from the remote twice. This
578 578 # is a bit weird. But at the time it was implemented, there wasn't an easy
579 579 # way to pull just non-changegroup data.
580 580 exchange.pull(destrepo, srcpeer, heads=revs)
581 581
582 582 _postshareupdate(destrepo, update)
583 583
584 584 return srcpeer, peer(ui, peeropts, dest)
585 585
586 586
587 587 # Recomputing caches is often slow on big repos, so copy them.
588 588 def _copycache(srcrepo, dstcachedir, fname):
589 589 """copy a cache from srcrepo to destcachedir (if it exists)"""
590 590 srcfname = srcrepo.cachevfs.join(fname)
591 591 dstfname = os.path.join(dstcachedir, fname)
592 592 if os.path.exists(srcfname):
593 593 if not os.path.exists(dstcachedir):
594 594 os.mkdir(dstcachedir)
595 595 util.copyfile(srcfname, dstfname)
596 596
597 597
598 598 def clone(
599 599 ui,
600 600 peeropts,
601 601 source,
602 602 dest=None,
603 603 pull=False,
604 604 revs=None,
605 605 update=True,
606 606 stream=False,
607 607 branch=None,
608 608 shareopts=None,
609 609 storeincludepats=None,
610 610 storeexcludepats=None,
611 611 depth=None,
612 612 ):
613 613 """Make a copy of an existing repository.
614 614
615 615 Create a copy of an existing repository in a new directory. The
616 616 source and destination are URLs, as passed to the repository
617 617 function. Returns a pair of repository peers, the source and
618 618 newly created destination.
619 619
620 620 The location of the source is added to the new repository's
621 621 .hg/hgrc file, as the default to be used for future pulls and
622 622 pushes.
623 623
624 624 If an exception is raised, the partly cloned/updated destination
625 625 repository will be deleted.
626 626
627 627 Arguments:
628 628
629 629 source: repository object or URL
630 630
631 631 dest: URL of destination repository to create (defaults to base
632 632 name of source repository)
633 633
634 634 pull: always pull from source repository, even in local case or if the
635 635 server prefers streaming
636 636
637 637 stream: stream raw data uncompressed from repository (fast over
638 638 LAN, slow over WAN)
639 639
640 640 revs: revision to clone up to (implies pull=True)
641 641
642 642 update: update working directory after clone completes, if
643 643 destination is local repository (True means update to default rev,
644 644 anything else is treated as a revision)
645 645
646 646 branch: branches to clone
647 647
648 648 shareopts: dict of options to control auto sharing behavior. The "pool" key
649 649 activates auto sharing mode and defines the directory for stores. The
650 650 "mode" key determines how to construct the directory name of the shared
651 651 repository. "identity" means the name is derived from the node of the first
652 652 changeset in the repository. "remote" means the name is derived from the
653 653 remote's path/URL. Defaults to "identity."
654 654
655 655 storeincludepats and storeexcludepats: sets of file patterns to include and
656 656 exclude in the repository copy, respectively. If not defined, all files
657 657 will be included (a "full" clone). Otherwise a "narrow" clone containing
658 658 only the requested files will be performed. If ``storeincludepats`` is not
659 659 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
660 660 ``path:.``. If both are empty sets, no files will be cloned.
661 661 """
662 662
663 663 if isinstance(source, bytes):
664 664 src = urlutil.get_clone_path(ui, source, branch)
665 665 origsource, source, branches = src
666 666 srcpeer = peer(ui, peeropts, source)
667 667 else:
668 668 srcpeer = source.peer() # in case we were called with a localrepo
669 669 branches = (None, branch or [])
670 670 origsource = source = srcpeer.url()
671 671 srclock = destlock = destwlock = cleandir = None
672 672 destpeer = None
673 673 try:
674 674 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
675 675
676 676 if dest is None:
677 677 dest = defaultdest(source)
678 678 if dest:
679 679 ui.status(_(b"destination directory: %s\n") % dest)
680 680 else:
681 681 dest = urlutil.get_clone_path(ui, dest)[0]
682 682
683 683 dest = urlutil.urllocalpath(dest)
684 684 source = urlutil.urllocalpath(source)
685 685
686 686 if not dest:
687 687 raise error.InputError(_(b"empty destination path is not valid"))
688 688
689 689 destvfs = vfsmod.vfs(dest, expandpath=True)
690 690 if destvfs.lexists():
691 691 if not destvfs.isdir():
692 692 raise error.InputError(
693 693 _(b"destination '%s' already exists") % dest
694 694 )
695 695 elif destvfs.listdir():
696 696 raise error.InputError(
697 697 _(b"destination '%s' is not empty") % dest
698 698 )
699 699
700 700 createopts = {}
701 701 narrow = False
702 702
703 703 if storeincludepats is not None:
704 704 narrowspec.validatepatterns(storeincludepats)
705 705 narrow = True
706 706
707 707 if storeexcludepats is not None:
708 708 narrowspec.validatepatterns(storeexcludepats)
709 709 narrow = True
710 710
711 711 if narrow:
712 712 # Include everything by default if only exclusion patterns defined.
713 713 if storeexcludepats and not storeincludepats:
714 714 storeincludepats = {b'path:.'}
715 715
716 716 createopts[b'narrowfiles'] = True
717 717
718 718 if depth:
719 719 createopts[b'shallowfilestore'] = True
720 720
721 721 if srcpeer.capable(b'lfs-serve'):
722 722 # Repository creation honors the config if it disabled the extension, so
723 723 # we can't just announce that lfs will be enabled. This check avoids
724 724 # saying that lfs will be enabled, and then saying it's an unknown
725 725 # feature. The lfs creation option is set in either case so that a
726 726 # requirement is added. If the extension is explicitly disabled but the
727 727 # requirement is set, the clone aborts early, before transferring any
728 728 # data.
729 729 createopts[b'lfs'] = True
730 730
731 if extensions.disabled_help(b'lfs'):
731 if b'lfs' in extensions.disabled():
732 732 ui.status(
733 733 _(
734 734 b'(remote is using large file support (lfs), but it is '
735 735 b'explicitly disabled in the local configuration)\n'
736 736 )
737 737 )
738 738 else:
739 739 ui.status(
740 740 _(
741 741 b'(remote is using large file support (lfs); lfs will '
742 742 b'be enabled for this repository)\n'
743 743 )
744 744 )
745 745
746 746 shareopts = shareopts or {}
747 747 sharepool = shareopts.get(b'pool')
748 748 sharenamemode = shareopts.get(b'mode')
749 749 if sharepool and islocal(dest):
750 750 sharepath = None
751 751 if sharenamemode == b'identity':
752 752 # Resolve the name from the initial changeset in the remote
753 753 # repository. This returns nullid when the remote is empty. It
754 754 # raises RepoLookupError if revision 0 is filtered or otherwise
755 755 # not available. If we fail to resolve, sharing is not enabled.
756 756 try:
757 757 with srcpeer.commandexecutor() as e:
758 758 rootnode = e.callcommand(
759 759 b'lookup',
760 760 {
761 761 b'key': b'0',
762 762 },
763 763 ).result()
764 764
765 765 if rootnode != sha1nodeconstants.nullid:
766 766 sharepath = os.path.join(sharepool, hex(rootnode))
767 767 else:
768 768 ui.status(
769 769 _(
770 770 b'(not using pooled storage: '
771 771 b'remote appears to be empty)\n'
772 772 )
773 773 )
774 774 except error.RepoLookupError:
775 775 ui.status(
776 776 _(
777 777 b'(not using pooled storage: '
778 778 b'unable to resolve identity of remote)\n'
779 779 )
780 780 )
781 781 elif sharenamemode == b'remote':
782 782 sharepath = os.path.join(
783 783 sharepool, hex(hashutil.sha1(source).digest())
784 784 )
785 785 else:
786 786 raise error.Abort(
787 787 _(b'unknown share naming mode: %s') % sharenamemode
788 788 )
789 789
790 790 # TODO this is a somewhat arbitrary restriction.
791 791 if narrow:
792 792 ui.status(
793 793 _(b'(pooled storage not supported for narrow clones)\n')
794 794 )
795 795 sharepath = None
796 796
797 797 if sharepath:
798 798 return clonewithshare(
799 799 ui,
800 800 peeropts,
801 801 sharepath,
802 802 source,
803 803 srcpeer,
804 804 dest,
805 805 pull=pull,
806 806 rev=revs,
807 807 update=update,
808 808 stream=stream,
809 809 )
810 810
811 811 srcrepo = srcpeer.local()
812 812
813 813 abspath = origsource
814 814 if islocal(origsource):
815 815 abspath = util.abspath(urlutil.urllocalpath(origsource))
816 816
817 817 if islocal(dest):
818 818 if os.path.exists(dest):
819 819 # only clean up directories we create ourselves
820 820 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
821 821 cleandir = hgdir
822 822 else:
823 823 cleandir = dest
824 824
825 825 copy = False
826 826 if (
827 827 srcrepo
828 828 and srcrepo.cancopy()
829 829 and islocal(dest)
830 830 and not phases.hassecret(srcrepo)
831 831 ):
832 832 copy = not pull and not revs
833 833
834 834 # TODO this is a somewhat arbitrary restriction.
835 835 if narrow:
836 836 copy = False
837 837
838 838 if copy:
839 839 try:
840 840 # we use a lock here because if we race with commit, we
841 841 # can end up with extra data in the cloned revlogs that's
842 842 # not pointed to by changesets, thus causing verify to
843 843 # fail
844 844 srclock = srcrepo.lock(wait=False)
845 845 except error.LockError:
846 846 copy = False
847 847
848 848 if copy:
849 849 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
850 850
851 851 destrootpath = urlutil.urllocalpath(dest)
852 852 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
853 853 localrepo.createrepository(
854 854 ui,
855 855 destrootpath,
856 856 requirements=dest_reqs,
857 857 )
858 858 destrepo = localrepo.makelocalrepository(ui, destrootpath)
859 859
860 860 destwlock = destrepo.wlock()
861 861 destlock = destrepo.lock()
862 862 from . import streamclone # avoid cycle
863 863
864 864 streamclone.local_copy(srcrepo, destrepo)
865 865
866 866 # we need to re-init the repo after manually copying the data
867 867 # into it
868 868 destpeer = peer(srcrepo, peeropts, dest)
869 869
870 870 # make the peer aware that is it already locked
871 871 #
872 872 # important:
873 873 #
874 874 # We still need to release that lock at the end of the function
875 875 destpeer.local()._lockref = weakref.ref(destlock)
876 876 destpeer.local()._wlockref = weakref.ref(destwlock)
877 877 # dirstate also needs to be copied because `_wlockref` has a reference
878 878 # to it: this dirstate is saved to disk when the wlock is released
879 879 destpeer.local().dirstate = destrepo.dirstate
880 880
881 881 srcrepo.hook(
882 882 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
883 883 )
884 884 else:
885 885 try:
886 886 # only pass ui when no srcrepo
887 887 destpeer = peer(
888 888 srcrepo or ui,
889 889 peeropts,
890 890 dest,
891 891 create=True,
892 892 createopts=createopts,
893 893 )
894 894 except FileExistsError:
895 895 cleandir = None
896 896 raise error.Abort(_(b"destination '%s' already exists") % dest)
897 897
898 898 if revs:
899 899 if not srcpeer.capable(b'lookup'):
900 900 raise error.Abort(
901 901 _(
902 902 b"src repository does not support "
903 903 b"revision lookup and so doesn't "
904 904 b"support clone by revision"
905 905 )
906 906 )
907 907
908 908 # TODO this is batchable.
909 909 remoterevs = []
910 910 for rev in revs:
911 911 with srcpeer.commandexecutor() as e:
912 912 remoterevs.append(
913 913 e.callcommand(
914 914 b'lookup',
915 915 {
916 916 b'key': rev,
917 917 },
918 918 ).result()
919 919 )
920 920 revs = remoterevs
921 921
922 922 checkout = revs[0]
923 923 else:
924 924 revs = None
925 925 local = destpeer.local()
926 926 if local:
927 927 if narrow:
928 928 with local.wlock(), local.lock():
929 929 local.setnarrowpats(storeincludepats, storeexcludepats)
930 930 narrowspec.copytoworkingcopy(local)
931 931
932 932 u = urlutil.url(abspath)
933 933 defaulturl = bytes(u)
934 934 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
935 935 if not stream:
936 936 if pull:
937 937 stream = False
938 938 else:
939 939 stream = None
940 940 # internal config: ui.quietbookmarkmove
941 941 overrides = {(b'ui', b'quietbookmarkmove'): True}
942 942 with local.ui.configoverride(overrides, b'clone'):
943 943 exchange.pull(
944 944 local,
945 945 srcpeer,
946 946 heads=revs,
947 947 streamclonerequested=stream,
948 948 includepats=storeincludepats,
949 949 excludepats=storeexcludepats,
950 950 depth=depth,
951 951 )
952 952 elif srcrepo:
953 953 # TODO lift restriction once exchange.push() accepts narrow
954 954 # push.
955 955 if narrow:
956 956 raise error.Abort(
957 957 _(
958 958 b'narrow clone not available for '
959 959 b'remote destinations'
960 960 )
961 961 )
962 962
963 963 exchange.push(
964 964 srcrepo,
965 965 destpeer,
966 966 revs=revs,
967 967 bookmarks=srcrepo._bookmarks.keys(),
968 968 )
969 969 else:
970 970 raise error.Abort(
971 971 _(b"clone from remote to remote not supported")
972 972 )
973 973
974 974 cleandir = None
975 975
976 976 destrepo = destpeer.local()
977 977 if destrepo:
978 978 template = uimod.samplehgrcs[b'cloned']
979 979 u = urlutil.url(abspath)
980 980 u.passwd = None
981 981 defaulturl = bytes(u)
982 982 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
983 983 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
984 984
985 985 if ui.configbool(b'experimental', b'remotenames'):
986 986 logexchange.pullremotenames(destrepo, srcpeer)
987 987
988 988 if update:
989 989 if update is not True:
990 990 with srcpeer.commandexecutor() as e:
991 991 checkout = e.callcommand(
992 992 b'lookup',
993 993 {
994 994 b'key': update,
995 995 },
996 996 ).result()
997 997
998 998 uprev = None
999 999 status = None
1000 1000 if checkout is not None:
1001 1001 # Some extensions (at least hg-git and hg-subversion) have
1002 1002 # a peer.lookup() implementation that returns a name instead
1003 1003 # of a nodeid. We work around it here until we've figured
1004 1004 # out a better solution.
1005 1005 if len(checkout) == 20 and checkout in destrepo:
1006 1006 uprev = checkout
1007 1007 elif scmutil.isrevsymbol(destrepo, checkout):
1008 1008 uprev = scmutil.revsymbol(destrepo, checkout).node()
1009 1009 else:
1010 1010 if update is not True:
1011 1011 try:
1012 1012 uprev = destrepo.lookup(update)
1013 1013 except error.RepoLookupError:
1014 1014 pass
1015 1015 if uprev is None:
1016 1016 try:
1017 1017 if destrepo._activebookmark:
1018 1018 uprev = destrepo.lookup(destrepo._activebookmark)
1019 1019 update = destrepo._activebookmark
1020 1020 else:
1021 1021 uprev = destrepo._bookmarks[b'@']
1022 1022 update = b'@'
1023 1023 bn = destrepo[uprev].branch()
1024 1024 if bn == b'default':
1025 1025 status = _(b"updating to bookmark %s\n" % update)
1026 1026 else:
1027 1027 status = (
1028 1028 _(b"updating to bookmark %s on branch %s\n")
1029 1029 ) % (update, bn)
1030 1030 except KeyError:
1031 1031 try:
1032 1032 uprev = destrepo.branchtip(b'default')
1033 1033 except error.RepoLookupError:
1034 1034 uprev = destrepo.lookup(b'tip')
1035 1035 if not status:
1036 1036 bn = destrepo[uprev].branch()
1037 1037 status = _(b"updating to branch %s\n") % bn
1038 1038 destrepo.ui.status(status)
1039 1039 _update(destrepo, uprev)
1040 1040 if update in destrepo._bookmarks:
1041 1041 bookmarks.activate(destrepo, update)
1042 1042 if destlock is not None:
1043 1043 release(destlock)
1044 1044 if destwlock is not None:
1045 1045 release(destlock)
1046 1046 # here is a tiny windows were someone could end up writing the
1047 1047 # repository before the cache are sure to be warm. This is "fine"
1048 1048 # as the only "bad" outcome would be some slowness. That potential
1049 1049 # slowness already affect reader.
1050 1050 with destrepo.lock():
1051 1051 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1052 1052 finally:
1053 1053 release(srclock, destlock, destwlock)
1054 1054 if cleandir is not None:
1055 1055 shutil.rmtree(cleandir, True)
1056 1056 if srcpeer is not None:
1057 1057 srcpeer.close()
1058 1058 if destpeer and destpeer.local() is None:
1059 1059 destpeer.close()
1060 1060 return srcpeer, destpeer
1061 1061
1062 1062
1063 1063 def _showstats(repo, stats, quietempty=False):
1064 1064 if quietempty and stats.isempty():
1065 1065 return
1066 1066 repo.ui.status(
1067 1067 _(
1068 1068 b"%d files updated, %d files merged, "
1069 1069 b"%d files removed, %d files unresolved\n"
1070 1070 )
1071 1071 % (
1072 1072 stats.updatedcount,
1073 1073 stats.mergedcount,
1074 1074 stats.removedcount,
1075 1075 stats.unresolvedcount,
1076 1076 )
1077 1077 )
1078 1078
1079 1079
1080 1080 def updaterepo(repo, node, overwrite, updatecheck=None):
1081 1081 """Update the working directory to node.
1082 1082
1083 1083 When overwrite is set, changes are clobbered, merged else
1084 1084
1085 1085 returns stats (see pydoc mercurial.merge.applyupdates)"""
1086 1086 repo.ui.deprecwarn(
1087 1087 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1088 1088 b'5.7',
1089 1089 )
1090 1090 return mergemod._update(
1091 1091 repo,
1092 1092 node,
1093 1093 branchmerge=False,
1094 1094 force=overwrite,
1095 1095 labels=[b'working copy', b'destination'],
1096 1096 updatecheck=updatecheck,
1097 1097 )
1098 1098
1099 1099
1100 1100 def update(repo, node, quietempty=False, updatecheck=None):
1101 1101 """update the working directory to node"""
1102 1102 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1103 1103 _showstats(repo, stats, quietempty)
1104 1104 if stats.unresolvedcount:
1105 1105 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1106 1106 return stats.unresolvedcount > 0
1107 1107
1108 1108
1109 1109 # naming conflict in clone()
1110 1110 _update = update
1111 1111
1112 1112
1113 1113 def clean(repo, node, show_stats=True, quietempty=False):
1114 1114 """forcibly switch the working directory to node, clobbering changes"""
1115 1115 stats = mergemod.clean_update(repo[node])
1116 1116 assert stats.unresolvedcount == 0
1117 1117 if show_stats:
1118 1118 _showstats(repo, stats, quietempty)
1119 1119 return False
1120 1120
1121 1121
1122 1122 # naming conflict in updatetotally()
1123 1123 _clean = clean
1124 1124
1125 1125 _VALID_UPDATECHECKS = {
1126 1126 mergemod.UPDATECHECK_ABORT,
1127 1127 mergemod.UPDATECHECK_NONE,
1128 1128 mergemod.UPDATECHECK_LINEAR,
1129 1129 mergemod.UPDATECHECK_NO_CONFLICT,
1130 1130 }
1131 1131
1132 1132
1133 1133 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1134 1134 """Update the working directory with extra care for non-file components
1135 1135
1136 1136 This takes care of non-file components below:
1137 1137
1138 1138 :bookmark: might be advanced or (in)activated
1139 1139
1140 1140 This takes arguments below:
1141 1141
1142 1142 :checkout: to which revision the working directory is updated
1143 1143 :brev: a name, which might be a bookmark to be activated after updating
1144 1144 :clean: whether changes in the working directory can be discarded
1145 1145 :updatecheck: how to deal with a dirty working directory
1146 1146
1147 1147 Valid values for updatecheck are the UPDATECHECK_* constants
1148 1148 defined in the merge module. Passing `None` will result in using the
1149 1149 configured default.
1150 1150
1151 1151 * ABORT: abort if the working directory is dirty
1152 1152 * NONE: don't check (merge working directory changes into destination)
1153 1153 * LINEAR: check that update is linear before merging working directory
1154 1154 changes into destination
1155 1155 * NO_CONFLICT: check that the update does not result in file merges
1156 1156
1157 1157 This returns whether conflict is detected at updating or not.
1158 1158 """
1159 1159 if updatecheck is None:
1160 1160 updatecheck = ui.config(b'commands', b'update.check')
1161 1161 if updatecheck not in _VALID_UPDATECHECKS:
1162 1162 # If not configured, or invalid value configured
1163 1163 updatecheck = mergemod.UPDATECHECK_LINEAR
1164 1164 if updatecheck not in _VALID_UPDATECHECKS:
1165 1165 raise ValueError(
1166 1166 r'Invalid updatecheck value %r (can accept %r)'
1167 1167 % (updatecheck, _VALID_UPDATECHECKS)
1168 1168 )
1169 1169 with repo.wlock():
1170 1170 movemarkfrom = None
1171 1171 warndest = False
1172 1172 if checkout is None:
1173 1173 updata = destutil.destupdate(repo, clean=clean)
1174 1174 checkout, movemarkfrom, brev = updata
1175 1175 warndest = True
1176 1176
1177 1177 if clean:
1178 1178 ret = _clean(repo, checkout)
1179 1179 else:
1180 1180 if updatecheck == mergemod.UPDATECHECK_ABORT:
1181 1181 cmdutil.bailifchanged(repo, merge=False)
1182 1182 updatecheck = mergemod.UPDATECHECK_NONE
1183 1183 ret = _update(repo, checkout, updatecheck=updatecheck)
1184 1184
1185 1185 if not ret and movemarkfrom:
1186 1186 if movemarkfrom == repo[b'.'].node():
1187 1187 pass # no-op update
1188 1188 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1189 1189 b = ui.label(repo._activebookmark, b'bookmarks.active')
1190 1190 ui.status(_(b"updating bookmark %s\n") % b)
1191 1191 else:
1192 1192 # this can happen with a non-linear update
1193 1193 b = ui.label(repo._activebookmark, b'bookmarks')
1194 1194 ui.status(_(b"(leaving bookmark %s)\n") % b)
1195 1195 bookmarks.deactivate(repo)
1196 1196 elif brev in repo._bookmarks:
1197 1197 if brev != repo._activebookmark:
1198 1198 b = ui.label(brev, b'bookmarks.active')
1199 1199 ui.status(_(b"(activating bookmark %s)\n") % b)
1200 1200 bookmarks.activate(repo, brev)
1201 1201 elif brev:
1202 1202 if repo._activebookmark:
1203 1203 b = ui.label(repo._activebookmark, b'bookmarks')
1204 1204 ui.status(_(b"(leaving bookmark %s)\n") % b)
1205 1205 bookmarks.deactivate(repo)
1206 1206
1207 1207 if warndest:
1208 1208 destutil.statusotherdests(ui, repo)
1209 1209
1210 1210 return ret
1211 1211
1212 1212
1213 1213 def merge(
1214 1214 ctx,
1215 1215 force=False,
1216 1216 remind=True,
1217 1217 labels=None,
1218 1218 ):
1219 1219 """Branch merge with node, resolving changes. Return true if any
1220 1220 unresolved conflicts."""
1221 1221 repo = ctx.repo()
1222 1222 stats = mergemod.merge(ctx, force=force, labels=labels)
1223 1223 _showstats(repo, stats)
1224 1224 if stats.unresolvedcount:
1225 1225 repo.ui.status(
1226 1226 _(
1227 1227 b"use 'hg resolve' to retry unresolved file merges "
1228 1228 b"or 'hg merge --abort' to abandon\n"
1229 1229 )
1230 1230 )
1231 1231 elif remind:
1232 1232 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1233 1233 return stats.unresolvedcount > 0
1234 1234
1235 1235
1236 1236 def abortmerge(ui, repo):
1237 1237 ms = mergestatemod.mergestate.read(repo)
1238 1238 if ms.active():
1239 1239 # there were conflicts
1240 1240 node = ms.localctx.hex()
1241 1241 else:
1242 1242 # there were no conficts, mergestate was not stored
1243 1243 node = repo[b'.'].hex()
1244 1244
1245 1245 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1246 1246 stats = mergemod.clean_update(repo[node])
1247 1247 assert stats.unresolvedcount == 0
1248 1248 _showstats(repo, stats)
1249 1249
1250 1250
1251 1251 def _incoming(
1252 1252 displaychlist,
1253 1253 subreporecurse,
1254 1254 ui,
1255 1255 repo,
1256 1256 source,
1257 1257 opts,
1258 1258 buffered=False,
1259 1259 subpath=None,
1260 1260 ):
1261 1261 """
1262 1262 Helper for incoming / gincoming.
1263 1263 displaychlist gets called with
1264 1264 (remoterepo, incomingchangesetlist, displayer) parameters,
1265 1265 and is supposed to contain only code that can't be unified.
1266 1266 """
1267 1267 srcs = urlutil.get_pull_paths(repo, ui, [source])
1268 1268 srcs = list(srcs)
1269 1269 if len(srcs) != 1:
1270 1270 msg = _(b'for now, incoming supports only a single source, %d provided')
1271 1271 msg %= len(srcs)
1272 1272 raise error.Abort(msg)
1273 1273 path = srcs[0]
1274 1274 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1275 1275 if subpath is not None:
1276 1276 subpath = urlutil.url(subpath)
1277 1277 if subpath.isabs():
1278 1278 source = bytes(subpath)
1279 1279 else:
1280 1280 p = urlutil.url(source)
1281 1281 if p.islocal():
1282 1282 normpath = os.path.normpath
1283 1283 else:
1284 1284 normpath = posixpath.normpath
1285 1285 p.path = normpath(b'%s/%s' % (p.path, subpath))
1286 1286 source = bytes(p)
1287 1287 other = peer(repo, opts, source)
1288 1288 cleanupfn = other.close
1289 1289 try:
1290 1290 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1291 1291 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1292 1292
1293 1293 if revs:
1294 1294 revs = [other.lookup(rev) for rev in revs]
1295 1295 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1296 1296 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1297 1297 )
1298 1298
1299 1299 if not chlist:
1300 1300 ui.status(_(b"no changes found\n"))
1301 1301 return subreporecurse()
1302 1302 ui.pager(b'incoming')
1303 1303 displayer = logcmdutil.changesetdisplayer(
1304 1304 ui, other, opts, buffered=buffered
1305 1305 )
1306 1306 displaychlist(other, chlist, displayer)
1307 1307 displayer.close()
1308 1308 finally:
1309 1309 cleanupfn()
1310 1310 subreporecurse()
1311 1311 return 0 # exit code is zero since we found incoming changes
1312 1312
1313 1313
1314 1314 def incoming(ui, repo, source, opts, subpath=None):
1315 1315 def subreporecurse():
1316 1316 ret = 1
1317 1317 if opts.get(b'subrepos'):
1318 1318 ctx = repo[None]
1319 1319 for subpath in sorted(ctx.substate):
1320 1320 sub = ctx.sub(subpath)
1321 1321 ret = min(ret, sub.incoming(ui, source, opts))
1322 1322 return ret
1323 1323
1324 1324 def display(other, chlist, displayer):
1325 1325 limit = logcmdutil.getlimit(opts)
1326 1326 if opts.get(b'newest_first'):
1327 1327 chlist.reverse()
1328 1328 count = 0
1329 1329 for n in chlist:
1330 1330 if limit is not None and count >= limit:
1331 1331 break
1332 1332 parents = [
1333 1333 p for p in other.changelog.parents(n) if p != repo.nullid
1334 1334 ]
1335 1335 if opts.get(b'no_merges') and len(parents) == 2:
1336 1336 continue
1337 1337 count += 1
1338 1338 displayer.show(other[n])
1339 1339
1340 1340 return _incoming(
1341 1341 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1342 1342 )
1343 1343
1344 1344
1345 1345 def _outgoing(ui, repo, dests, opts, subpath=None):
1346 1346 out = set()
1347 1347 others = []
1348 1348 for path in urlutil.get_push_paths(repo, ui, dests):
1349 1349 dest = path.pushloc or path.loc
1350 1350 if subpath is not None:
1351 1351 subpath = urlutil.url(subpath)
1352 1352 if subpath.isabs():
1353 1353 dest = bytes(subpath)
1354 1354 else:
1355 1355 p = urlutil.url(dest)
1356 1356 if p.islocal():
1357 1357 normpath = os.path.normpath
1358 1358 else:
1359 1359 normpath = posixpath.normpath
1360 1360 p.path = normpath(b'%s/%s' % (p.path, subpath))
1361 1361 dest = bytes(p)
1362 1362 branches = path.branch, opts.get(b'branch') or []
1363 1363
1364 1364 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1365 1365 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1366 1366 if revs:
1367 1367 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1368 1368
1369 1369 other = peer(repo, opts, dest)
1370 1370 try:
1371 1371 outgoing = discovery.findcommonoutgoing(
1372 1372 repo, other, revs, force=opts.get(b'force')
1373 1373 )
1374 1374 o = outgoing.missing
1375 1375 out.update(o)
1376 1376 if not o:
1377 1377 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1378 1378 others.append(other)
1379 1379 except: # re-raises
1380 1380 other.close()
1381 1381 raise
1382 1382 # make sure this is ordered by revision number
1383 1383 outgoing_revs = list(out)
1384 1384 cl = repo.changelog
1385 1385 outgoing_revs.sort(key=cl.rev)
1386 1386 return outgoing_revs, others
1387 1387
1388 1388
1389 1389 def _outgoing_recurse(ui, repo, dests, opts):
1390 1390 ret = 1
1391 1391 if opts.get(b'subrepos'):
1392 1392 ctx = repo[None]
1393 1393 for subpath in sorted(ctx.substate):
1394 1394 sub = ctx.sub(subpath)
1395 1395 ret = min(ret, sub.outgoing(ui, dests, opts))
1396 1396 return ret
1397 1397
1398 1398
1399 1399 def _outgoing_filter(repo, revs, opts):
1400 1400 """apply revision filtering/ordering option for outgoing"""
1401 1401 limit = logcmdutil.getlimit(opts)
1402 1402 no_merges = opts.get(b'no_merges')
1403 1403 if opts.get(b'newest_first'):
1404 1404 revs.reverse()
1405 1405 if limit is None and not no_merges:
1406 1406 for r in revs:
1407 1407 yield r
1408 1408 return
1409 1409
1410 1410 count = 0
1411 1411 cl = repo.changelog
1412 1412 for n in revs:
1413 1413 if limit is not None and count >= limit:
1414 1414 break
1415 1415 parents = [p for p in cl.parents(n) if p != repo.nullid]
1416 1416 if no_merges and len(parents) == 2:
1417 1417 continue
1418 1418 count += 1
1419 1419 yield n
1420 1420
1421 1421
1422 1422 def outgoing(ui, repo, dests, opts, subpath=None):
1423 1423 if opts.get(b'graph'):
1424 1424 logcmdutil.checkunsupportedgraphflags([], opts)
1425 1425 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1426 1426 ret = 1
1427 1427 try:
1428 1428 if o:
1429 1429 ret = 0
1430 1430
1431 1431 if opts.get(b'graph'):
1432 1432 revdag = logcmdutil.graphrevs(repo, o, opts)
1433 1433 ui.pager(b'outgoing')
1434 1434 displayer = logcmdutil.changesetdisplayer(
1435 1435 ui, repo, opts, buffered=True
1436 1436 )
1437 1437 logcmdutil.displaygraph(
1438 1438 ui, repo, revdag, displayer, graphmod.asciiedges
1439 1439 )
1440 1440 else:
1441 1441 ui.pager(b'outgoing')
1442 1442 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1443 1443 for n in _outgoing_filter(repo, o, opts):
1444 1444 displayer.show(repo[n])
1445 1445 displayer.close()
1446 1446 for oth in others:
1447 1447 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1448 1448 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1449 1449 return ret # exit code is zero since we found outgoing changes
1450 1450 finally:
1451 1451 for oth in others:
1452 1452 oth.close()
1453 1453
1454 1454
1455 1455 def verify(repo, level=None):
1456 1456 """verify the consistency of a repository"""
1457 1457 ret = verifymod.verify(repo, level=level)
1458 1458
1459 1459 # Broken subrepo references in hidden csets don't seem worth worrying about,
1460 1460 # since they can't be pushed/pulled, and --hidden can be used if they are a
1461 1461 # concern.
1462 1462
1463 1463 # pathto() is needed for -R case
1464 1464 revs = repo.revs(
1465 1465 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1466 1466 )
1467 1467
1468 1468 if revs:
1469 1469 repo.ui.status(_(b'checking subrepo links\n'))
1470 1470 for rev in revs:
1471 1471 ctx = repo[rev]
1472 1472 try:
1473 1473 for subpath in ctx.substate:
1474 1474 try:
1475 1475 ret = (
1476 1476 ctx.sub(subpath, allowcreate=False).verify() or ret
1477 1477 )
1478 1478 except error.RepoError as e:
1479 1479 repo.ui.warn(b'%d: %s\n' % (rev, e))
1480 1480 except Exception:
1481 1481 repo.ui.warn(
1482 1482 _(b'.hgsubstate is corrupt in revision %s\n')
1483 1483 % short(ctx.node())
1484 1484 )
1485 1485
1486 1486 return ret
1487 1487
1488 1488
1489 1489 def remoteui(src, opts):
1490 1490 """build a remote ui from ui or repo and opts"""
1491 1491 if util.safehasattr(src, b'baseui'): # looks like a repository
1492 1492 dst = src.baseui.copy() # drop repo-specific config
1493 1493 src = src.ui # copy target options from repo
1494 1494 else: # assume it's a global ui object
1495 1495 dst = src.copy() # keep all global options
1496 1496
1497 1497 # copy ssh-specific options
1498 1498 for o in b'ssh', b'remotecmd':
1499 1499 v = opts.get(o) or src.config(b'ui', o)
1500 1500 if v:
1501 1501 dst.setconfig(b"ui", o, v, b'copied')
1502 1502
1503 1503 # copy bundle-specific options
1504 1504 r = src.config(b'bundle', b'mainreporoot')
1505 1505 if r:
1506 1506 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1507 1507
1508 1508 # copy selected local settings to the remote ui
1509 1509 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1510 1510 for key, val in src.configitems(sect):
1511 1511 dst.setconfig(sect, key, val, b'copied')
1512 1512 v = src.config(b'web', b'cacerts')
1513 1513 if v:
1514 1514 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1515 1515
1516 1516 return dst
1517 1517
1518 1518
1519 1519 # Files of interest
1520 1520 # Used to check if the repository has changed looking at mtime and size of
1521 1521 # these files.
1522 1522 foi = [
1523 1523 (b'spath', b'00changelog.i'),
1524 1524 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1525 1525 (b'spath', b'obsstore'),
1526 1526 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1527 1527 ]
1528 1528
1529 1529
1530 1530 class cachedlocalrepo:
1531 1531 """Holds a localrepository that can be cached and reused."""
1532 1532
1533 1533 def __init__(self, repo):
1534 1534 """Create a new cached repo from an existing repo.
1535 1535
1536 1536 We assume the passed in repo was recently created. If the
1537 1537 repo has changed between when it was created and when it was
1538 1538 turned into a cache, it may not refresh properly.
1539 1539 """
1540 1540 assert isinstance(repo, localrepo.localrepository)
1541 1541 self._repo = repo
1542 1542 self._state, self.mtime = self._repostate()
1543 1543 self._filtername = repo.filtername
1544 1544
1545 1545 def fetch(self):
1546 1546 """Refresh (if necessary) and return a repository.
1547 1547
1548 1548 If the cached instance is out of date, it will be recreated
1549 1549 automatically and returned.
1550 1550
1551 1551 Returns a tuple of the repo and a boolean indicating whether a new
1552 1552 repo instance was created.
1553 1553 """
1554 1554 # We compare the mtimes and sizes of some well-known files to
1555 1555 # determine if the repo changed. This is not precise, as mtimes
1556 1556 # are susceptible to clock skew and imprecise filesystems and
1557 1557 # file content can change while maintaining the same size.
1558 1558
1559 1559 state, mtime = self._repostate()
1560 1560 if state == self._state:
1561 1561 return self._repo, False
1562 1562
1563 1563 repo = repository(self._repo.baseui, self._repo.url())
1564 1564 if self._filtername:
1565 1565 self._repo = repo.filtered(self._filtername)
1566 1566 else:
1567 1567 self._repo = repo.unfiltered()
1568 1568 self._state = state
1569 1569 self.mtime = mtime
1570 1570
1571 1571 return self._repo, True
1572 1572
1573 1573 def _repostate(self):
1574 1574 state = []
1575 1575 maxmtime = -1
1576 1576 for attr, fname in foi:
1577 1577 prefix = getattr(self._repo, attr)
1578 1578 p = os.path.join(prefix, fname)
1579 1579 try:
1580 1580 st = os.stat(p)
1581 1581 except OSError:
1582 1582 st = os.stat(prefix)
1583 1583 state.append((st[stat.ST_MTIME], st.st_size))
1584 1584 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1585 1585
1586 1586 return tuple(state), maxmtime
1587 1587
1588 1588 def copy(self):
1589 1589 """Obtain a copy of this class instance.
1590 1590
1591 1591 A new localrepository instance is obtained. The new instance should be
1592 1592 completely independent of the original.
1593 1593 """
1594 1594 repo = repository(self._repo.baseui, self._repo.origroot)
1595 1595 if self._filtername:
1596 1596 repo = repo.filtered(self._filtername)
1597 1597 else:
1598 1598 repo = repo.unfiltered()
1599 1599 c = cachedlocalrepo(repo)
1600 1600 c._state = self._state
1601 1601 c.mtime = self.mtime
1602 1602 return c
General Comments 0
You need to be logged in to leave comments. Login now