##// END OF EJS Templates
sshpeer: enable+fix warning about sshpeers not being closed explicitly...
Valentin Gatien-Baron -
r47804:a4c19a16 default
parent child Browse files
Show More
@@ -704,16 +704,19 b' def _pull(orig, ui, repo, source=b"defau'
704 704
705 705 if scratchbookmarks:
706 706 other = hg.peer(repo, opts, source)
707 fetchedbookmarks = other.listkeyspatterns(
708 b'bookmarks', patterns=scratchbookmarks
709 )
710 for bookmark in scratchbookmarks:
711 if bookmark not in fetchedbookmarks:
712 raise error.Abort(
713 b'remote bookmark %s not found!' % bookmark
714 )
715 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
716 revs.append(fetchedbookmarks[bookmark])
707 try:
708 fetchedbookmarks = other.listkeyspatterns(
709 b'bookmarks', patterns=scratchbookmarks
710 )
711 for bookmark in scratchbookmarks:
712 if bookmark not in fetchedbookmarks:
713 raise error.Abort(
714 b'remote bookmark %s not found!' % bookmark
715 )
716 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
717 revs.append(fetchedbookmarks[bookmark])
718 finally:
719 other.close()
717 720 opts[b'bookmark'] = bookmarks
718 721 opts[b'rev'] = revs
719 722
@@ -848,10 +851,13 b' def _push(orig, ui, repo, dest=None, *ar'
848 851 if common.isremotebooksenabled(ui):
849 852 if bookmark and scratchpush:
850 853 other = hg.peer(repo, opts, destpath)
851 fetchedbookmarks = other.listkeyspatterns(
852 b'bookmarks', patterns=[bookmark]
853 )
854 remotescratchbookmarks.update(fetchedbookmarks)
854 try:
855 fetchedbookmarks = other.listkeyspatterns(
856 b'bookmarks', patterns=[bookmark]
857 )
858 remotescratchbookmarks.update(fetchedbookmarks)
859 finally:
860 other.close()
855 861 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
856 862 if oldphasemove:
857 863 exchange._localphasemove = oldphasemove
@@ -595,77 +595,83 b' def trackedcmd(ui, repo, remotepath=None'
595 595 ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
596 596 remote = hg.peer(repo, opts, url)
597 597
598 # check narrow support before doing anything if widening needs to be
599 # performed. In future we should also abort if client is ellipses and
600 # server does not support ellipses
601 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
602 raise error.Abort(_(b"server does not support narrow clones"))
598 try:
599 # check narrow support before doing anything if widening needs to be
600 # performed. In future we should also abort if client is ellipses and
601 # server does not support ellipses
602 if (
603 widening
604 and wireprototypes.NARROWCAP not in remote.capabilities()
605 ):
606 raise error.Abort(_(b"server does not support narrow clones"))
603 607
604 commoninc = discovery.findcommonincoming(repo, remote)
608 commoninc = discovery.findcommonincoming(repo, remote)
605 609
606 if autoremoveincludes:
607 outgoing = discovery.findcommonoutgoing(
608 repo, remote, commoninc=commoninc
609 )
610 ui.status(_(b'looking for unused includes to remove\n'))
611 localfiles = set()
612 for n in itertools.chain(outgoing.missing, outgoing.excluded):
613 localfiles.update(repo[n].files())
614 suggestedremovals = []
615 for include in sorted(oldincludes):
616 match = narrowspec.match(repo.root, [include], oldexcludes)
617 if not any(match(f) for f in localfiles):
618 suggestedremovals.append(include)
619 if suggestedremovals:
620 for s in suggestedremovals:
621 ui.status(b'%s\n' % s)
622 if (
623 ui.promptchoice(
624 _(
625 b'remove these unused includes (yn)?'
626 b'$$ &Yes $$ &No'
610 if autoremoveincludes:
611 outgoing = discovery.findcommonoutgoing(
612 repo, remote, commoninc=commoninc
613 )
614 ui.status(_(b'looking for unused includes to remove\n'))
615 localfiles = set()
616 for n in itertools.chain(outgoing.missing, outgoing.excluded):
617 localfiles.update(repo[n].files())
618 suggestedremovals = []
619 for include in sorted(oldincludes):
620 match = narrowspec.match(repo.root, [include], oldexcludes)
621 if not any(match(f) for f in localfiles):
622 suggestedremovals.append(include)
623 if suggestedremovals:
624 for s in suggestedremovals:
625 ui.status(b'%s\n' % s)
626 if (
627 ui.promptchoice(
628 _(
629 b'remove these unused includes (yn)?'
630 b'$$ &Yes $$ &No'
631 )
627 632 )
628 )
629 == 0
630 ):
631 removedincludes.update(suggestedremovals)
632 narrowing = True
633 else:
634 ui.status(_(b'found no unused includes\n'))
633 == 0
634 ):
635 removedincludes.update(suggestedremovals)
636 narrowing = True
637 else:
638 ui.status(_(b'found no unused includes\n'))
635 639
636 if narrowing:
637 newincludes = oldincludes - removedincludes
638 newexcludes = oldexcludes | addedexcludes
639 _narrow(
640 ui,
641 repo,
642 remote,
643 commoninc,
644 oldincludes,
645 oldexcludes,
646 newincludes,
647 newexcludes,
648 opts[b'force_delete_local_changes'],
649 opts[b'backup'],
650 )
651 # _narrow() updated the narrowspec and _widen() below needs to
652 # use the updated values as its base (otherwise removed includes
653 # and addedexcludes will be lost in the resulting narrowspec)
654 oldincludes = newincludes
655 oldexcludes = newexcludes
640 if narrowing:
641 newincludes = oldincludes - removedincludes
642 newexcludes = oldexcludes | addedexcludes
643 _narrow(
644 ui,
645 repo,
646 remote,
647 commoninc,
648 oldincludes,
649 oldexcludes,
650 newincludes,
651 newexcludes,
652 opts[b'force_delete_local_changes'],
653 opts[b'backup'],
654 )
655 # _narrow() updated the narrowspec and _widen() below needs to
656 # use the updated values as its base (otherwise removed includes
657 # and addedexcludes will be lost in the resulting narrowspec)
658 oldincludes = newincludes
659 oldexcludes = newexcludes
656 660
657 if widening:
658 newincludes = oldincludes | addedincludes
659 newexcludes = oldexcludes - removedexcludes
660 _widen(
661 ui,
662 repo,
663 remote,
664 commoninc,
665 oldincludes,
666 oldexcludes,
667 newincludes,
668 newexcludes,
669 )
661 if widening:
662 newincludes = oldincludes | addedincludes
663 newexcludes = oldexcludes - removedexcludes
664 _widen(
665 ui,
666 repo,
667 remote,
668 commoninc,
669 oldincludes,
670 oldexcludes,
671 newincludes,
672 newexcludes,
673 )
674 finally:
675 remote.close()
670 676
671 677 return 0
@@ -3820,132 +3820,138 b' def identify('
3820 3820 output = []
3821 3821 revs = []
3822 3822
3823 if source:
3824 source, branches = hg.parseurl(ui.expandpath(source))
3825 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3826 repo = peer.local()
3827 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3828
3829 fm = ui.formatter(b'identify', opts)
3830 fm.startitem()
3831
3832 if not repo:
3833 if num or branch or tags:
3834 raise error.InputError(
3835 _(b"can't query remote revision number, branch, or tags")
3836 )
3837 if not rev and revs:
3838 rev = revs[0]
3839 if not rev:
3840 rev = b"tip"
3841
3842 remoterev = peer.lookup(rev)
3843 hexrev = fm.hexfunc(remoterev)
3844 if default or id:
3845 output = [hexrev]
3846 fm.data(id=hexrev)
3847
3848 @util.cachefunc
3849 def getbms():
3850 bms = []
3851
3852 if b'bookmarks' in peer.listkeys(b'namespaces'):
3853 hexremoterev = hex(remoterev)
3854 bms = [
3855 bm
3856 for bm, bmr in pycompat.iteritems(
3857 peer.listkeys(b'bookmarks')
3823 peer = None
3824 try:
3825 if source:
3826 source, branches = hg.parseurl(ui.expandpath(source))
3827 # only pass ui when no repo
3828 peer = hg.peer(repo or ui, opts, source)
3829 repo = peer.local()
3830 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3831
3832 fm = ui.formatter(b'identify', opts)
3833 fm.startitem()
3834
3835 if not repo:
3836 if num or branch or tags:
3837 raise error.InputError(
3838 _(b"can't query remote revision number, branch, or tags")
3839 )
3840 if not rev and revs:
3841 rev = revs[0]
3842 if not rev:
3843 rev = b"tip"
3844
3845 remoterev = peer.lookup(rev)
3846 hexrev = fm.hexfunc(remoterev)
3847 if default or id:
3848 output = [hexrev]
3849 fm.data(id=hexrev)
3850
3851 @util.cachefunc
3852 def getbms():
3853 bms = []
3854
3855 if b'bookmarks' in peer.listkeys(b'namespaces'):
3856 hexremoterev = hex(remoterev)
3857 bms = [
3858 bm
3859 for bm, bmr in pycompat.iteritems(
3860 peer.listkeys(b'bookmarks')
3861 )
3862 if bmr == hexremoterev
3863 ]
3864
3865 return sorted(bms)
3866
3867 if fm.isplain():
3868 if bookmarks:
3869 output.extend(getbms())
3870 elif default and not ui.quiet:
3871 # multiple bookmarks for a single parent separated by '/'
3872 bm = b'/'.join(getbms())
3873 if bm:
3874 output.append(bm)
3875 else:
3876 fm.data(node=hex(remoterev))
3877 if bookmarks or b'bookmarks' in fm.datahint():
3878 fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
3879 else:
3880 if rev:
3881 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3882 ctx = scmutil.revsingle(repo, rev, None)
3883
3884 if ctx.rev() is None:
3885 ctx = repo[None]
3886 parents = ctx.parents()
3887 taglist = []
3888 for p in parents:
3889 taglist.extend(p.tags())
3890
3891 dirty = b""
3892 if ctx.dirty(missing=True, merge=False, branch=False):
3893 dirty = b'+'
3894 fm.data(dirty=dirty)
3895
3896 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3897 if default or id:
3898 output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
3899 fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
3900
3901 if num:
3902 numoutput = [b"%d" % p.rev() for p in parents]
3903 output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
3904
3905 fm.data(
3906 parents=fm.formatlist(
3907 [fm.hexfunc(p.node()) for p in parents], name=b'node'
3858 3908 )
3859 if bmr == hexremoterev
3860 ]
3861
3862 return sorted(bms)
3863
3864 if fm.isplain():
3865 if bookmarks:
3866 output.extend(getbms())
3867 elif default and not ui.quiet:
3909 )
3910 else:
3911 hexoutput = fm.hexfunc(ctx.node())
3912 if default or id:
3913 output = [hexoutput]
3914 fm.data(id=hexoutput)
3915
3916 if num:
3917 output.append(pycompat.bytestr(ctx.rev()))
3918 taglist = ctx.tags()
3919
3920 if default and not ui.quiet:
3921 b = ctx.branch()
3922 if b != b'default':
3923 output.append(b"(%s)" % b)
3924
3925 # multiple tags for a single parent separated by '/'
3926 t = b'/'.join(taglist)
3927 if t:
3928 output.append(t)
3929
3868 3930 # multiple bookmarks for a single parent separated by '/'
3869 bm = b'/'.join(getbms())
3931 bm = b'/'.join(ctx.bookmarks())
3870 3932 if bm:
3871 3933 output.append(bm)
3872 else:
3873 fm.data(node=hex(remoterev))
3874 if bookmarks or b'bookmarks' in fm.datahint():
3875 fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
3876 else:
3877 if rev:
3878 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3879 ctx = scmutil.revsingle(repo, rev, None)
3880
3881 if ctx.rev() is None:
3882 ctx = repo[None]
3883 parents = ctx.parents()
3884 taglist = []
3885 for p in parents:
3886 taglist.extend(p.tags())
3887
3888 dirty = b""
3889 if ctx.dirty(missing=True, merge=False, branch=False):
3890 dirty = b'+'
3891 fm.data(dirty=dirty)
3892
3893 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3894 if default or id:
3895 output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
3896 fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
3897
3898 if num:
3899 numoutput = [b"%d" % p.rev() for p in parents]
3900 output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
3901
3902 fm.data(
3903 parents=fm.formatlist(
3904 [fm.hexfunc(p.node()) for p in parents], name=b'node'
3905 )
3906 )
3907 else:
3908 hexoutput = fm.hexfunc(ctx.node())
3909 if default or id:
3910 output = [hexoutput]
3911 fm.data(id=hexoutput)
3912
3913 if num:
3914 output.append(pycompat.bytestr(ctx.rev()))
3915 taglist = ctx.tags()
3916
3917 if default and not ui.quiet:
3918 b = ctx.branch()
3919 if b != b'default':
3920 output.append(b"(%s)" % b)
3921
3922 # multiple tags for a single parent separated by '/'
3923 t = b'/'.join(taglist)
3924 if t:
3925 output.append(t)
3926
3927 # multiple bookmarks for a single parent separated by '/'
3928 bm = b'/'.join(ctx.bookmarks())
3929 if bm:
3930 output.append(bm)
3931 else:
3932 if branch:
3933 output.append(ctx.branch())
3934
3935 if tags:
3936 output.extend(taglist)
3937
3938 if bookmarks:
3939 output.extend(ctx.bookmarks())
3940
3941 fm.data(node=ctx.hex())
3942 fm.data(branch=ctx.branch())
3943 fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
3944 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
3945 fm.context(ctx=ctx)
3946
3947 fm.plain(b"%s\n" % b' '.join(output))
3948 fm.end()
3934 else:
3935 if branch:
3936 output.append(ctx.branch())
3937
3938 if tags:
3939 output.extend(taglist)
3940
3941 if bookmarks:
3942 output.extend(ctx.bookmarks())
3943
3944 fm.data(node=ctx.hex())
3945 fm.data(branch=ctx.branch())
3946 fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
3947 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
3948 fm.context(ctx=ctx)
3949
3950 fm.plain(b"%s\n" % b' '.join(output))
3951 fm.end()
3952 finally:
3953 if peer:
3954 peer.close()
3949 3955
3950 3956
3951 3957 @command(
@@ -4291,12 +4297,15 b' def incoming(ui, repo, source=b"default"'
4291 4297 ui.expandpath(source), opts.get(b'branch')
4292 4298 )
4293 4299 other = hg.peer(repo, opts, source)
4294 if b'bookmarks' not in other.listkeys(b'namespaces'):
4295 ui.warn(_(b"remote doesn't support bookmarks\n"))
4296 return 0
4297 ui.pager(b'incoming')
4298 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
4299 return bookmarks.incoming(ui, repo, other)
4300 try:
4301 if b'bookmarks' not in other.listkeys(b'namespaces'):
4302 ui.warn(_(b"remote doesn't support bookmarks\n"))
4303 return 0
4304 ui.pager(b'incoming')
4305 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
4306 return bookmarks.incoming(ui, repo, other)
4307 finally:
4308 other.close()
4300 4309
4301 4310 repo._subtoppath = ui.expandpath(source)
4302 4311 try:
@@ -4327,7 +4336,8 b' def init(ui, dest=b".", **opts):'
4327 4336 Returns 0 on success.
4328 4337 """
4329 4338 opts = pycompat.byteskwargs(opts)
4330 hg.peer(ui, opts, ui.expandpath(dest), create=True)
4339 peer = hg.peer(ui, opts, ui.expandpath(dest), create=True)
4340 peer.close()
4331 4341
4332 4342
4333 4343 @command(
@@ -4963,12 +4973,15 b' def outgoing(ui, repo, dest=None, **opts'
4963 4973 if opts.get(b'bookmarks'):
4964 4974 dest = path.pushloc or path.loc
4965 4975 other = hg.peer(repo, opts, dest)
4966 if b'bookmarks' not in other.listkeys(b'namespaces'):
4967 ui.warn(_(b"remote doesn't support bookmarks\n"))
4968 return 0
4969 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
4970 ui.pager(b'outgoing')
4971 return bookmarks.outgoing(ui, repo, other)
4976 try:
4977 if b'bookmarks' not in other.listkeys(b'namespaces'):
4978 ui.warn(_(b"remote doesn't support bookmarks\n"))
4979 return 0
4980 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
4981 ui.pager(b'outgoing')
4982 return bookmarks.outgoing(ui, repo, other)
4983 finally:
4984 other.close()
4972 4985
4973 4986 repo._subtoppath = path.pushloc or path.loc
4974 4987 try:
@@ -5679,63 +5692,67 b' def push(ui, repo, dest=None, **opts):'
5679 5692 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
5680 5693 other = hg.peer(repo, opts, dest)
5681 5694
5682 if revs:
5683 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5684 if not revs:
5695 try:
5696 if revs:
5697 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5698 if not revs:
5699 raise error.InputError(
5700 _(b"specified revisions evaluate to an empty set"),
5701 hint=_(b"use different revision arguments"),
5702 )
5703 elif path.pushrev:
5704 # It doesn't make any sense to specify ancestor revisions. So limit
5705 # to DAG heads to make discovery simpler.
5706 expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
5707 revs = scmutil.revrange(repo, [expr])
5708 revs = [repo[rev].node() for rev in revs]
5709 if not revs:
5710 raise error.InputError(
5711 _(b'default push revset for path evaluates to an empty set')
5712 )
5713 elif ui.configbool(b'commands', b'push.require-revs'):
5685 5714 raise error.InputError(
5686 _(b"specified revisions evaluate to an empty set"),
5687 hint=_(b"use different revision arguments"),
5715 _(b'no revisions specified to push'),
5716 hint=_(b'did you mean "hg push -r ."?'),
5688 5717 )
5689 elif path.pushrev:
5690 # It doesn't make any sense to specify ancestor revisions. So limit
5691 # to DAG heads to make discovery simpler.
5692 expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
5693 revs = scmutil.revrange(repo, [expr])
5694 revs = [repo[rev].node() for rev in revs]
5695 if not revs:
5696 raise error.InputError(
5697 _(b'default push revset for path evaluates to an empty set')
5698 )
5699 elif ui.configbool(b'commands', b'push.require-revs'):
5700 raise error.InputError(
5701 _(b'no revisions specified to push'),
5702 hint=_(b'did you mean "hg push -r ."?'),
5718
5719 repo._subtoppath = dest
5720 try:
5721 # push subrepos depth-first for coherent ordering
5722 c = repo[b'.']
5723 subs = c.substate # only repos that are committed
5724 for s in sorted(subs):
5725 result = c.sub(s).push(opts)
5726 if result == 0:
5727 return not result
5728 finally:
5729 del repo._subtoppath
5730
5731 opargs = dict(
5732 opts.get(b'opargs', {})
5733 ) # copy opargs since we may mutate it
5734 opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
5735
5736 pushop = exchange.push(
5737 repo,
5738 other,
5739 opts.get(b'force'),
5740 revs=revs,
5741 newbranch=opts.get(b'new_branch'),
5742 bookmarks=opts.get(b'bookmark', ()),
5743 publish=opts.get(b'publish'),
5744 opargs=opargs,
5703 5745 )
5704 5746
5705 repo._subtoppath = dest
5706 try:
5707 # push subrepos depth-first for coherent ordering
5708 c = repo[b'.']
5709 subs = c.substate # only repos that are committed
5710 for s in sorted(subs):
5711 result = c.sub(s).push(opts)
5712 if result == 0:
5713 return not result
5747 result = not pushop.cgresult
5748
5749 if pushop.bkresult is not None:
5750 if pushop.bkresult == 2:
5751 result = 2
5752 elif not result and pushop.bkresult:
5753 result = 2
5714 5754 finally:
5715 del repo._subtoppath
5716
5717 opargs = dict(opts.get(b'opargs', {})) # copy opargs since we may mutate it
5718 opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
5719
5720 pushop = exchange.push(
5721 repo,
5722 other,
5723 opts.get(b'force'),
5724 revs=revs,
5725 newbranch=opts.get(b'new_branch'),
5726 bookmarks=opts.get(b'bookmark', ()),
5727 publish=opts.get(b'publish'),
5728 opargs=opargs,
5729 )
5730
5731 result = not pushop.cgresult
5732
5733 if pushop.bkresult is not None:
5734 if pushop.bkresult == 2:
5735 result = 2
5736 elif not result and pushop.bkresult:
5737 result = 2
5738
5755 other.close()
5739 5756 return result
5740 5757
5741 5758
@@ -471,17 +471,20 b' def debugcapabilities(ui, path, **opts):'
471 471 """lists the capabilities of a remote peer"""
472 472 opts = pycompat.byteskwargs(opts)
473 473 peer = hg.peer(ui, opts, path)
474 caps = peer.capabilities()
475 ui.writenoi18n(b'Main capabilities:\n')
476 for c in sorted(caps):
477 ui.write(b' %s\n' % c)
478 b2caps = bundle2.bundle2caps(peer)
479 if b2caps:
480 ui.writenoi18n(b'Bundle2 capabilities:\n')
481 for key, values in sorted(pycompat.iteritems(b2caps)):
482 ui.write(b' %s\n' % key)
483 for v in values:
484 ui.write(b' %s\n' % v)
474 try:
475 caps = peer.capabilities()
476 ui.writenoi18n(b'Main capabilities:\n')
477 for c in sorted(caps):
478 ui.write(b' %s\n' % c)
479 b2caps = bundle2.bundle2caps(peer)
480 if b2caps:
481 ui.writenoi18n(b'Bundle2 capabilities:\n')
482 for key, values in sorted(pycompat.iteritems(b2caps)):
483 ui.write(b' %s\n' % key)
484 for v in values:
485 ui.write(b' %s\n' % v)
486 finally:
487 peer.close()
485 488
486 489
487 490 @command(
@@ -2615,12 +2618,17 b' def debugpeer(ui, path):'
2615 2618 with ui.configoverride(overrides):
2616 2619 peer = hg.peer(ui, {}, path)
2617 2620
2618 local = peer.local() is not None
2619 canpush = peer.canpush()
2620
2621 ui.write(_(b'url: %s\n') % peer.url())
2622 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2623 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2621 try:
2622 local = peer.local() is not None
2623 canpush = peer.canpush()
2624
2625 ui.write(_(b'url: %s\n') % peer.url())
2626 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2627 ui.write(
2628 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2629 )
2630 finally:
2631 peer.close()
2624 2632
2625 2633
2626 2634 @command(
@@ -2723,26 +2731,30 b' def debugpushkey(ui, repopath, namespace'
2723 2731 """
2724 2732
2725 2733 target = hg.peer(ui, {}, repopath)
2726 if keyinfo:
2727 key, old, new = keyinfo
2728 with target.commandexecutor() as e:
2729 r = e.callcommand(
2730 b'pushkey',
2731 {
2732 b'namespace': namespace,
2733 b'key': key,
2734 b'old': old,
2735 b'new': new,
2736 },
2737 ).result()
2738
2739 ui.status(pycompat.bytestr(r) + b'\n')
2740 return not r
2741 else:
2742 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2743 ui.write(
2744 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2745 )
2734 try:
2735 if keyinfo:
2736 key, old, new = keyinfo
2737 with target.commandexecutor() as e:
2738 r = e.callcommand(
2739 b'pushkey',
2740 {
2741 b'namespace': namespace,
2742 b'key': key,
2743 b'old': old,
2744 b'new': new,
2745 },
2746 ).result()
2747
2748 ui.status(pycompat.bytestr(r) + b'\n')
2749 return not r
2750 else:
2751 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2752 ui.write(
2753 b"%s\t%s\n"
2754 % (stringutil.escapestr(k), stringutil.escapestr(v))
2755 )
2756 finally:
2757 target.close()
2746 2758
2747 2759
2748 2760 @command(b'debugpvec', [], _(b'A B'))
@@ -4095,19 +4107,22 b' def debugwhyunstable(ui, repo, rev):'
4095 4107 def debugwireargs(ui, repopath, *vals, **opts):
4096 4108 opts = pycompat.byteskwargs(opts)
4097 4109 repo = hg.peer(ui, opts, repopath)
4098 for opt in cmdutil.remoteopts:
4099 del opts[opt[1]]
4100 args = {}
4101 for k, v in pycompat.iteritems(opts):
4102 if v:
4103 args[k] = v
4104 args = pycompat.strkwargs(args)
4105 # run twice to check that we don't mess up the stream for the next command
4106 res1 = repo.debugwireargs(*vals, **args)
4107 res2 = repo.debugwireargs(*vals, **args)
4108 ui.write(b"%s\n" % res1)
4109 if res1 != res2:
4110 ui.warn(b"%s\n" % res2)
4110 try:
4111 for opt in cmdutil.remoteopts:
4112 del opts[opt[1]]
4113 args = {}
4114 for k, v in pycompat.iteritems(opts):
4115 if v:
4116 args[k] = v
4117 args = pycompat.strkwargs(args)
4118 # run twice to check that we don't mess up the stream for the next command
4119 res1 = repo.debugwireargs(*vals, **args)
4120 res2 = repo.debugwireargs(*vals, **args)
4121 ui.write(b"%s\n" % res1)
4122 if res1 != res2:
4123 ui.warn(b"%s\n" % res2)
4124 finally:
4125 repo.close()
4111 4126
4112 4127
4113 4128 def _parsewirelangblocks(fh):
@@ -678,140 +678,148 b' def clone('
678 678 srcpeer = source.peer() # in case we were called with a localrepo
679 679 branches = (None, branch or [])
680 680 origsource = source = srcpeer.url()
681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
681 srclock = destlock = cleandir = None
682 destpeer = None
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682 685
683 if dest is None:
684 dest = defaultdest(source)
685 if dest:
686 ui.status(_(b"destination directory: %s\n") % dest)
687 else:
688 dest = ui.expandpath(dest)
686 if dest is None:
687 dest = defaultdest(source)
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
691 dest = ui.expandpath(dest)
689 692
690 dest = util.urllocalpath(dest)
691 source = util.urllocalpath(source)
693 dest = util.urllocalpath(dest)
694 source = util.urllocalpath(source)
692 695
693 if not dest:
694 raise error.InputError(_(b"empty destination path is not valid"))
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
695 698
696 destvfs = vfsmod.vfs(dest, expandpath=True)
697 if destvfs.lexists():
698 if not destvfs.isdir():
699 raise error.InputError(_(b"destination '%s' already exists") % dest)
700 elif destvfs.listdir():
701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
701 if not destvfs.isdir():
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
704 )
705 elif destvfs.listdir():
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
708 )
702 709
703 createopts = {}
704 narrow = False
705
706 if storeincludepats is not None:
707 narrowspec.validatepatterns(storeincludepats)
708 narrow = True
710 createopts = {}
711 narrow = False
709 712
710 if storeexcludepats is not None:
711 narrowspec.validatepatterns(storeexcludepats)
712 narrow = True
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
716
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
713 720
714 if narrow:
715 # Include everything by default if only exclusion patterns defined.
716 if storeexcludepats and not storeincludepats:
717 storeincludepats = {b'path:.'}
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
718 725
719 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
720 727
721 if depth:
722 createopts[b'shallowfilestore'] = True
728 if depth:
729 createopts[b'shallowfilestore'] = True
723 730
724 if srcpeer.capable(b'lfs-serve'):
725 # Repository creation honors the config if it disabled the extension, so
726 # we can't just announce that lfs will be enabled. This check avoids
727 # saying that lfs will be enabled, and then saying it's an unknown
728 # feature. The lfs creation option is set in either case so that a
729 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is set, the clone aborts early, before transferring any
731 # data.
732 createopts[b'lfs'] = True
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
739 createopts[b'lfs'] = True
733 740
734 if extensions.disabled_help(b'lfs'):
735 ui.status(
736 _(
737 b'(remote is using large file support (lfs), but it is '
738 b'explicitly disabled in the local configuration)\n'
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
743 _(
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
746 )
739 747 )
740 )
741 else:
742 ui.status(
743 _(
744 b'(remote is using large file support (lfs); lfs will '
745 b'be enabled for this repository)\n'
748 else:
749 ui.status(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
753 )
746 754 )
747 )
748 755
749 shareopts = shareopts or {}
750 sharepool = shareopts.get(b'pool')
751 sharenamemode = shareopts.get(b'mode')
752 if sharepool and islocal(dest):
753 sharepath = None
754 if sharenamemode == b'identity':
755 # Resolve the name from the initial changeset in the remote
756 # repository. This returns nullid when the remote is empty. It
757 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # not available. If we fail to resolve, sharing is not enabled.
759 try:
760 with srcpeer.commandexecutor() as e:
761 rootnode = e.callcommand(
762 b'lookup',
763 {
764 b'key': b'0',
765 },
766 ).result()
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
760 sharepath = None
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
769 b'lookup',
770 {
771 b'key': b'0',
772 },
773 ).result()
767 774
768 if rootnode != nullid:
769 sharepath = os.path.join(sharepool, hex(rootnode))
770 else:
775 if rootnode != nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
778 ui.status(
779 _(
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
782 )
783 )
784 except error.RepoLookupError:
771 785 ui.status(
772 786 _(
773 787 b'(not using pooled storage: '
774 b'remote appears to be empty)\n'
788 b'unable to resolve identity of remote)\n'
775 789 )
776 790 )
777 except error.RepoLookupError:
778 ui.status(
779 _(
780 b'(not using pooled storage: '
781 b'unable to resolve identity of remote)\n'
782 )
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
795 else:
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
783 798 )
784 elif sharenamemode == b'remote':
785 sharepath = os.path.join(
786 sharepool, hex(hashutil.sha1(source).digest())
787 )
788 else:
789 raise error.Abort(
790 _(b'unknown share naming mode: %s') % sharenamemode
791 )
799
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
805 sharepath = None
792 806
793 # TODO this is a somewhat arbitrary restriction.
794 if narrow:
795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
796 sharepath = None
807 if sharepath:
808 return clonewithshare(
809 ui,
810 peeropts,
811 sharepath,
812 source,
813 srcpeer,
814 dest,
815 pull=pull,
816 rev=revs,
817 update=update,
818 stream=stream,
819 )
797 820
798 if sharepath:
799 return clonewithshare(
800 ui,
801 peeropts,
802 sharepath,
803 source,
804 srcpeer,
805 dest,
806 pull=pull,
807 rev=revs,
808 update=update,
809 stream=stream,
810 )
821 srcrepo = srcpeer.local()
811 822
812 srclock = destlock = cleandir = None
813 srcrepo = srcpeer.local()
814 try:
815 823 abspath = origsource
816 824 if islocal(origsource):
817 825 abspath = os.path.abspath(util.urllocalpath(origsource))
@@ -1052,6 +1060,8 b' def clone('
1052 1060 shutil.rmtree(cleandir, True)
1053 1061 if srcpeer is not None:
1054 1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1055 1065 return srcpeer, destpeer
1056 1066
1057 1067
@@ -1253,15 +1263,17 b' def _incoming('
1253 1263 """
1254 1264 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1255 1265 other = peer(repo, opts, source)
1256 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1257 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1266 cleanupfn = other.close
1267 try:
1268 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1269 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1258 1270
1259 if revs:
1260 revs = [other.lookup(rev) for rev in revs]
1261 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1262 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1263 )
1264 try:
1271 if revs:
1272 revs = [other.lookup(rev) for rev in revs]
1273 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1274 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1275 )
1276
1265 1277 if not chlist:
1266 1278 ui.status(_(b"no changes found\n"))
1267 1279 return subreporecurse()
@@ -1320,13 +1332,17 b' def _outgoing(ui, repo, dest, opts):'
1320 1332 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1321 1333
1322 1334 other = peer(repo, opts, dest)
1323 outgoing = discovery.findcommonoutgoing(
1324 repo, other, revs, force=opts.get(b'force')
1325 )
1326 o = outgoing.missing
1327 if not o:
1328 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1329 return o, other
1335 try:
1336 outgoing = discovery.findcommonoutgoing(
1337 repo, other, revs, force=opts.get(b'force')
1338 )
1339 o = outgoing.missing
1340 if not o:
1341 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1342 return o, other
1343 except: # re-raises
1344 other.close()
1345 raise
1330 1346
1331 1347
1332 1348 def outgoing(ui, repo, dest, opts):
@@ -1341,27 +1357,30 b' def outgoing(ui, repo, dest, opts):'
1341 1357
1342 1358 limit = logcmdutil.getlimit(opts)
1343 1359 o, other = _outgoing(ui, repo, dest, opts)
1344 if not o:
1345 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1346 return recurse()
1360 try:
1361 if not o:
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 return recurse()
1347 1364
1348 if opts.get(b'newest_first'):
1349 o.reverse()
1350 ui.pager(b'outgoing')
1351 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1352 count = 0
1353 for n in o:
1354 if limit is not None and count >= limit:
1355 break
1356 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1357 if opts.get(b'no_merges') and len(parents) == 2:
1358 continue
1359 count += 1
1360 displayer.show(repo[n])
1361 displayer.close()
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 recurse()
1364 return 0 # exit code is zero since we found outgoing changes
1365 if opts.get(b'newest_first'):
1366 o.reverse()
1367 ui.pager(b'outgoing')
1368 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1369 count = 0
1370 for n in o:
1371 if limit is not None and count >= limit:
1372 break
1373 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1374 if opts.get(b'no_merges') and len(parents) == 2:
1375 continue
1376 count += 1
1377 displayer.show(repo[n])
1378 displayer.close()
1379 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1380 recurse()
1381 return 0 # exit code is zero since we found outgoing changes
1382 finally:
1383 other.close()
1365 1384
1366 1385
1367 1386 def verify(repo, level=None):
@@ -1841,9 +1841,12 b' def outgoing(repo, subset, x):'
1841 1841 if revs:
1842 1842 revs = [repo.lookup(rev) for rev in revs]
1843 1843 other = hg.peer(repo, {}, dest)
1844 repo.ui.pushbuffer()
1845 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1846 repo.ui.popbuffer()
1844 try:
1845 repo.ui.pushbuffer()
1846 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1847 repo.ui.popbuffer()
1848 finally:
1849 other.close()
1847 1850 cl = repo.changelog
1848 1851 o = {cl.rev(r) for r in outgoing.missing}
1849 1852 return subset & o
@@ -175,10 +175,7 b' def _cleanuppipes(ui, pipei, pipeo, pipe'
175 175 # to deadlocks due to a peer get gc'ed in a fork
176 176 # We add our own stack trace, because the stacktrace when called
177 177 # from __del__ is useless.
178 if False: # enabled in next commit
179 ui.develwarn(
180 b'missing close on SSH connection created at:\n%s' % warn
181 )
178 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
182 179
183 180
184 181 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
@@ -716,13 +716,17 b' class hgsubrepo(abstractsubrepo):'
716 716 _(b'sharing subrepo %s from %s\n')
717 717 % (subrelpath(self), srcurl)
718 718 )
719 shared = hg.share(
720 self._repo._subparent.baseui,
721 getpeer(),
722 self._repo.root,
723 update=False,
724 bookmarks=False,
725 )
719 peer = getpeer()
720 try:
721 shared = hg.share(
722 self._repo._subparent.baseui,
723 peer,
724 self._repo.root,
725 update=False,
726 bookmarks=False,
727 )
728 finally:
729 peer.close()
726 730 self._repo = shared.local()
727 731 else:
728 732 # TODO: find a common place for this and this code in the
@@ -743,14 +747,18 b' class hgsubrepo(abstractsubrepo):'
743 747 _(b'cloning subrepo %s from %s\n')
744 748 % (subrelpath(self), util.hidepassword(srcurl))
745 749 )
746 other, cloned = hg.clone(
747 self._repo._subparent.baseui,
748 {},
749 getpeer(),
750 self._repo.root,
751 update=False,
752 shareopts=shareopts,
753 )
750 peer = getpeer()
751 try:
752 other, cloned = hg.clone(
753 self._repo._subparent.baseui,
754 {},
755 peer,
756 self._repo.root,
757 update=False,
758 shareopts=shareopts,
759 )
760 finally:
761 peer.close()
754 762 self._repo = cloned.local()
755 763 self._initrepo(parentrepo, source, create=True)
756 764 self._cachestorehash(srcurl)
@@ -760,7 +768,11 b' class hgsubrepo(abstractsubrepo):'
760 768 % (subrelpath(self), util.hidepassword(srcurl))
761 769 )
762 770 cleansub = self.storeclean(srcurl)
763 exchange.pull(self._repo, getpeer())
771 peer = getpeer()
772 try:
773 exchange.pull(self._repo, peer)
774 finally:
775 peer.close()
764 776 if cleansub:
765 777 # keep the repo clean after pull
766 778 self._cachestorehash(srcurl)
@@ -845,7 +857,10 b' class hgsubrepo(abstractsubrepo):'
845 857 % (subrelpath(self), util.hidepassword(dsturl))
846 858 )
847 859 other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
848 res = exchange.push(self._repo, other, force, newbranch=newbranch)
860 try:
861 res = exchange.push(self._repo, other, force, newbranch=newbranch)
862 finally:
863 other.close()
849 864
850 865 # the repo is now clean
851 866 self._cachestorehash(dsturl)
@@ -21,7 +21,10 b' def getflogheads(ui, repo, path):'
21 21 dest = repo.ui.expandpath(b'default')
22 22 peer = hg.peer(repo, {}, dest)
23 23
24 flogheads = peer.x_rfl_getflogheads(path)
24 try:
25 flogheads = peer.x_rfl_getflogheads(path)
26 finally:
27 peer.close()
25 28
26 29 if flogheads:
27 30 for head in flogheads:
@@ -361,6 +361,7 b' Empty [acl.allow]'
361 361 bundle2-input-bundle: 5 parts total
362 362 transaction abort!
363 363 rollback completed
364 truncating cache/rbc-revs-v1 to 8
364 365 abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
365 366 no rollback information available
366 367 0:6675d58eff77
@@ -808,7 +809,6 b' fred is not blocked from moving bookmark'
808 809 acl: acl.deny.bookmarks not enabled
809 810 acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
810 811 bundle2-input-bundle: 7 parts total
811 truncating cache/rbc-revs-v1 to 8
812 812 updating the branch cache
813 813 invalid branch cache (served.hidden): tip differs
814 814 added 1 changesets with 1 changes to 1 files
@@ -900,6 +900,7 b' fred is not allowed to move bookmarks'
900 900 bundle2-input-bundle: 7 parts total
901 901 transaction abort!
902 902 rollback completed
903 truncating cache/rbc-revs-v1 to 8
903 904 abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
904 905 no rollback information available
905 906 0:6675d58eff77
@@ -985,7 +986,6 b' barney is allowed everywhere'
985 986 bundle2-input-part: "phase-heads" supported
986 987 bundle2-input-part: total payload size 24
987 988 bundle2-input-bundle: 5 parts total
988 truncating cache/rbc-revs-v1 to 8
989 989 updating the branch cache
990 990 added 3 changesets with 3 changes to 3 files
991 991 bundle2-output-bundle: "HG20", 1 parts total
@@ -1073,6 +1073,7 b' wilma can change files with a .txt exten'
1073 1073 bundle2-input-bundle: 5 parts total
1074 1074 transaction abort!
1075 1075 rollback completed
1076 truncating cache/rbc-revs-v1 to 8
1076 1077 abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
1077 1078 no rollback information available
1078 1079 0:6675d58eff77
@@ -1322,7 +1323,6 b' acl.config can set only [acl.allow]/[acl'
1322 1323 bundle2-input-part: "phase-heads" supported
1323 1324 bundle2-input-part: total payload size 24
1324 1325 bundle2-input-bundle: 5 parts total
1325 truncating cache/rbc-revs-v1 to 8
1326 1326 updating the branch cache
1327 1327 added 3 changesets with 3 changes to 3 files
1328 1328 bundle2-output-bundle: "HG20", 1 parts total
@@ -1499,6 +1499,7 b' no one is allowed inside foo/Bar/'
1499 1499 bundle2-input-bundle: 5 parts total
1500 1500 transaction abort!
1501 1501 rollback completed
1502 truncating cache/rbc-revs-v1 to 8
1502 1503 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1503 1504 no rollback information available
1504 1505 0:6675d58eff77
@@ -1583,7 +1584,6 b' OS-level groups'
1583 1584 bundle2-input-part: "phase-heads" supported
1584 1585 bundle2-input-part: total payload size 24
1585 1586 bundle2-input-bundle: 5 parts total
1586 truncating cache/rbc-revs-v1 to 8
1587 1587 updating the branch cache
1588 1588 added 3 changesets with 3 changes to 3 files
1589 1589 bundle2-output-bundle: "HG20", 1 parts total
@@ -1671,6 +1671,7 b' OS-level groups'
1671 1671 bundle2-input-bundle: 5 parts total
1672 1672 transaction abort!
1673 1673 rollback completed
1674 truncating cache/rbc-revs-v1 to 8
1674 1675 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1675 1676 no rollback information available
1676 1677 0:6675d58eff77
@@ -382,6 +382,7 b' test http authentication'
382 382 devel-peer-request: 16 bytes of commands arguments in headers
383 383 devel-peer-request: finished in *.???? seconds (200) (glob)
384 384 received listkey for "phases": 15 bytes
385 (sent 9 HTTP requests and 3898 bytes; received 920 bytes in responses)
385 386 $ hg rollback -q
386 387
387 388 $ sed 's/.*] "/"/' < ../access.log
@@ -462,6 +462,7 b' lfs content, and the extension enabled.'
462 462 remote: adding manifests
463 463 remote: adding file changes
464 464 remote: added 1 changesets with 1 changes to 1 files
465 (sent 8 HTTP requests and 3526 bytes; received 961 bytes in responses) (?)
465 466 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
466 467 .hg/requires:lfs
467 468 $TESTTMP/server/.hg/requires:lfs
General Comments 0
You need to be logged in to leave comments. Login now