##// END OF EJS Templates
sshpeer: enable+fix warning about sshpeers not being closed explicitly...
Valentin Gatien-Baron -
r47410:ebfa20e6 default draft
parent child Browse files
Show More
@@ -704,16 +704,19 b' def _pull(orig, ui, repo, source=b"defau'
704
704
705 if scratchbookmarks:
705 if scratchbookmarks:
706 other = hg.peer(repo, opts, source)
706 other = hg.peer(repo, opts, source)
707 fetchedbookmarks = other.listkeyspatterns(
707 try:
708 b'bookmarks', patterns=scratchbookmarks
708 fetchedbookmarks = other.listkeyspatterns(
709 )
709 b'bookmarks', patterns=scratchbookmarks
710 for bookmark in scratchbookmarks:
710 )
711 if bookmark not in fetchedbookmarks:
711 for bookmark in scratchbookmarks:
712 raise error.Abort(
712 if bookmark not in fetchedbookmarks:
713 b'remote bookmark %s not found!' % bookmark
713 raise error.Abort(
714 )
714 b'remote bookmark %s not found!' % bookmark
715 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
715 )
716 revs.append(fetchedbookmarks[bookmark])
716 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
717 revs.append(fetchedbookmarks[bookmark])
718 finally:
719 other.close()
717 opts[b'bookmark'] = bookmarks
720 opts[b'bookmark'] = bookmarks
718 opts[b'rev'] = revs
721 opts[b'rev'] = revs
719
722
@@ -848,10 +851,13 b' def _push(orig, ui, repo, dest=None, *ar'
848 if common.isremotebooksenabled(ui):
851 if common.isremotebooksenabled(ui):
849 if bookmark and scratchpush:
852 if bookmark and scratchpush:
850 other = hg.peer(repo, opts, destpath)
853 other = hg.peer(repo, opts, destpath)
851 fetchedbookmarks = other.listkeyspatterns(
854 try:
852 b'bookmarks', patterns=[bookmark]
855 fetchedbookmarks = other.listkeyspatterns(
853 )
856 b'bookmarks', patterns=[bookmark]
854 remotescratchbookmarks.update(fetchedbookmarks)
857 )
858 remotescratchbookmarks.update(fetchedbookmarks)
859 finally:
860 other.close()
855 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
861 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
856 if oldphasemove:
862 if oldphasemove:
857 exchange._localphasemove = oldphasemove
863 exchange._localphasemove = oldphasemove
@@ -595,77 +595,83 b' def trackedcmd(ui, repo, remotepath=None'
595 ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
595 ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
596 remote = hg.peer(repo, opts, url)
596 remote = hg.peer(repo, opts, url)
597
597
598 # check narrow support before doing anything if widening needs to be
598 try:
599 # performed. In future we should also abort if client is ellipses and
599 # check narrow support before doing anything if widening needs to be
600 # server does not support ellipses
600 # performed. In future we should also abort if client is ellipses and
601 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
601 # server does not support ellipses
602 raise error.Abort(_(b"server does not support narrow clones"))
602 if (
603 widening
604 and wireprototypes.NARROWCAP not in remote.capabilities()
605 ):
606 raise error.Abort(_(b"server does not support narrow clones"))
603
607
604 commoninc = discovery.findcommonincoming(repo, remote)
608 commoninc = discovery.findcommonincoming(repo, remote)
605
609
606 if autoremoveincludes:
610 if autoremoveincludes:
607 outgoing = discovery.findcommonoutgoing(
611 outgoing = discovery.findcommonoutgoing(
608 repo, remote, commoninc=commoninc
612 repo, remote, commoninc=commoninc
609 )
613 )
610 ui.status(_(b'looking for unused includes to remove\n'))
614 ui.status(_(b'looking for unused includes to remove\n'))
611 localfiles = set()
615 localfiles = set()
612 for n in itertools.chain(outgoing.missing, outgoing.excluded):
616 for n in itertools.chain(outgoing.missing, outgoing.excluded):
613 localfiles.update(repo[n].files())
617 localfiles.update(repo[n].files())
614 suggestedremovals = []
618 suggestedremovals = []
615 for include in sorted(oldincludes):
619 for include in sorted(oldincludes):
616 match = narrowspec.match(repo.root, [include], oldexcludes)
620 match = narrowspec.match(repo.root, [include], oldexcludes)
617 if not any(match(f) for f in localfiles):
621 if not any(match(f) for f in localfiles):
618 suggestedremovals.append(include)
622 suggestedremovals.append(include)
619 if suggestedremovals:
623 if suggestedremovals:
620 for s in suggestedremovals:
624 for s in suggestedremovals:
621 ui.status(b'%s\n' % s)
625 ui.status(b'%s\n' % s)
622 if (
626 if (
623 ui.promptchoice(
627 ui.promptchoice(
624 _(
628 _(
625 b'remove these unused includes (yn)?'
629 b'remove these unused includes (yn)?'
626 b'$$ &Yes $$ &No'
630 b'$$ &Yes $$ &No'
631 )
627 )
632 )
628 )
633 == 0
629 == 0
634 ):
630 ):
635 removedincludes.update(suggestedremovals)
631 removedincludes.update(suggestedremovals)
636 narrowing = True
632 narrowing = True
637 else:
633 else:
638 ui.status(_(b'found no unused includes\n'))
634 ui.status(_(b'found no unused includes\n'))
635
639
636 if narrowing:
640 if narrowing:
637 newincludes = oldincludes - removedincludes
641 newincludes = oldincludes - removedincludes
638 newexcludes = oldexcludes | addedexcludes
642 newexcludes = oldexcludes | addedexcludes
639 _narrow(
643 _narrow(
640 ui,
644 ui,
641 repo,
645 repo,
642 remote,
646 remote,
643 commoninc,
647 commoninc,
644 oldincludes,
648 oldincludes,
645 oldexcludes,
649 oldexcludes,
646 newincludes,
650 newincludes,
647 newexcludes,
651 newexcludes,
648 opts[b'force_delete_local_changes'],
652 opts[b'force_delete_local_changes'],
649 opts[b'backup'],
653 opts[b'backup'],
650 )
654 )
651 # _narrow() updated the narrowspec and _widen() below needs to
655 # _narrow() updated the narrowspec and _widen() below needs to
652 # use the updated values as its base (otherwise removed includes
656 # use the updated values as its base (otherwise removed includes
653 # and addedexcludes will be lost in the resulting narrowspec)
657 # and addedexcludes will be lost in the resulting narrowspec)
654 oldincludes = newincludes
658 oldincludes = newincludes
655 oldexcludes = newexcludes
659 oldexcludes = newexcludes
656
660
657 if widening:
661 if widening:
658 newincludes = oldincludes | addedincludes
662 newincludes = oldincludes | addedincludes
659 newexcludes = oldexcludes - removedexcludes
663 newexcludes = oldexcludes - removedexcludes
660 _widen(
664 _widen(
661 ui,
665 ui,
662 repo,
666 repo,
663 remote,
667 remote,
664 commoninc,
668 commoninc,
665 oldincludes,
669 oldincludes,
666 oldexcludes,
670 oldexcludes,
667 newincludes,
671 newincludes,
668 newexcludes,
672 newexcludes,
669 )
673 )
674 finally:
675 remote.close()
670
676
671 return 0
677 return 0
@@ -3820,132 +3820,138 b' def identify('
3820 output = []
3820 output = []
3821 revs = []
3821 revs = []
3822
3822
3823 if source:
3823 peer = None
3824 source, branches = hg.parseurl(ui.expandpath(source))
3824 try:
3825 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3825 if source:
3826 repo = peer.local()
3826 source, branches = hg.parseurl(ui.expandpath(source))
3827 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3827 # only pass ui when no repo
3828
3828 peer = hg.peer(repo or ui, opts, source)
3829 fm = ui.formatter(b'identify', opts)
3829 repo = peer.local()
3830 fm.startitem()
3830 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3831
3831
3832 if not repo:
3832 fm = ui.formatter(b'identify', opts)
3833 if num or branch or tags:
3833 fm.startitem()
3834 raise error.InputError(
3834
3835 _(b"can't query remote revision number, branch, or tags")
3835 if not repo:
3836 )
3836 if num or branch or tags:
3837 if not rev and revs:
3837 raise error.InputError(
3838 rev = revs[0]
3838 _(b"can't query remote revision number, branch, or tags")
3839 if not rev:
3839 )
3840 rev = b"tip"
3840 if not rev and revs:
3841
3841 rev = revs[0]
3842 remoterev = peer.lookup(rev)
3842 if not rev:
3843 hexrev = fm.hexfunc(remoterev)
3843 rev = b"tip"
3844 if default or id:
3844
3845 output = [hexrev]
3845 remoterev = peer.lookup(rev)
3846 fm.data(id=hexrev)
3846 hexrev = fm.hexfunc(remoterev)
3847
3847 if default or id:
3848 @util.cachefunc
3848 output = [hexrev]
3849 def getbms():
3849 fm.data(id=hexrev)
3850 bms = []
3850
3851
3851 @util.cachefunc
3852 if b'bookmarks' in peer.listkeys(b'namespaces'):
3852 def getbms():
3853 hexremoterev = hex(remoterev)
3853 bms = []
3854 bms = [
3854
3855 bm
3855 if b'bookmarks' in peer.listkeys(b'namespaces'):
3856 for bm, bmr in pycompat.iteritems(
3856 hexremoterev = hex(remoterev)
3857 peer.listkeys(b'bookmarks')
3857 bms = [
3858 bm
3859 for bm, bmr in pycompat.iteritems(
3860 peer.listkeys(b'bookmarks')
3861 )
3862 if bmr == hexremoterev
3863 ]
3864
3865 return sorted(bms)
3866
3867 if fm.isplain():
3868 if bookmarks:
3869 output.extend(getbms())
3870 elif default and not ui.quiet:
3871 # multiple bookmarks for a single parent separated by '/'
3872 bm = b'/'.join(getbms())
3873 if bm:
3874 output.append(bm)
3875 else:
3876 fm.data(node=hex(remoterev))
3877 if bookmarks or b'bookmarks' in fm.datahint():
3878 fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
3879 else:
3880 if rev:
3881 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3882 ctx = scmutil.revsingle(repo, rev, None)
3883
3884 if ctx.rev() is None:
3885 ctx = repo[None]
3886 parents = ctx.parents()
3887 taglist = []
3888 for p in parents:
3889 taglist.extend(p.tags())
3890
3891 dirty = b""
3892 if ctx.dirty(missing=True, merge=False, branch=False):
3893 dirty = b'+'
3894 fm.data(dirty=dirty)
3895
3896 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3897 if default or id:
3898 output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
3899 fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
3900
3901 if num:
3902 numoutput = [b"%d" % p.rev() for p in parents]
3903 output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
3904
3905 fm.data(
3906 parents=fm.formatlist(
3907 [fm.hexfunc(p.node()) for p in parents], name=b'node'
3858 )
3908 )
3859 if bmr == hexremoterev
3909 )
3860 ]
3910 else:
3861
3911 hexoutput = fm.hexfunc(ctx.node())
3862 return sorted(bms)
3912 if default or id:
3863
3913 output = [hexoutput]
3864 if fm.isplain():
3914 fm.data(id=hexoutput)
3865 if bookmarks:
3915
3866 output.extend(getbms())
3916 if num:
3867 elif default and not ui.quiet:
3917 output.append(pycompat.bytestr(ctx.rev()))
3918 taglist = ctx.tags()
3919
3920 if default and not ui.quiet:
3921 b = ctx.branch()
3922 if b != b'default':
3923 output.append(b"(%s)" % b)
3924
3925 # multiple tags for a single parent separated by '/'
3926 t = b'/'.join(taglist)
3927 if t:
3928 output.append(t)
3929
3868 # multiple bookmarks for a single parent separated by '/'
3930 # multiple bookmarks for a single parent separated by '/'
3869 bm = b'/'.join(getbms())
3931 bm = b'/'.join(ctx.bookmarks())
3870 if bm:
3932 if bm:
3871 output.append(bm)
3933 output.append(bm)
3872 else:
3934 else:
3873 fm.data(node=hex(remoterev))
3935 if branch:
3874 if bookmarks or b'bookmarks' in fm.datahint():
3936 output.append(ctx.branch())
3875 fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
3937
3876 else:
3938 if tags:
3877 if rev:
3939 output.extend(taglist)
3878 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3940
3879 ctx = scmutil.revsingle(repo, rev, None)
3941 if bookmarks:
3880
3942 output.extend(ctx.bookmarks())
3881 if ctx.rev() is None:
3943
3882 ctx = repo[None]
3944 fm.data(node=ctx.hex())
3883 parents = ctx.parents()
3945 fm.data(branch=ctx.branch())
3884 taglist = []
3946 fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
3885 for p in parents:
3947 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
3886 taglist.extend(p.tags())
3948 fm.context(ctx=ctx)
3887
3949
3888 dirty = b""
3950 fm.plain(b"%s\n" % b' '.join(output))
3889 if ctx.dirty(missing=True, merge=False, branch=False):
3951 fm.end()
3890 dirty = b'+'
3952 finally:
3891 fm.data(dirty=dirty)
3953 if peer:
3892
3954 peer.close()
3893 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3894 if default or id:
3895 output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
3896 fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
3897
3898 if num:
3899 numoutput = [b"%d" % p.rev() for p in parents]
3900 output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
3901
3902 fm.data(
3903 parents=fm.formatlist(
3904 [fm.hexfunc(p.node()) for p in parents], name=b'node'
3905 )
3906 )
3907 else:
3908 hexoutput = fm.hexfunc(ctx.node())
3909 if default or id:
3910 output = [hexoutput]
3911 fm.data(id=hexoutput)
3912
3913 if num:
3914 output.append(pycompat.bytestr(ctx.rev()))
3915 taglist = ctx.tags()
3916
3917 if default and not ui.quiet:
3918 b = ctx.branch()
3919 if b != b'default':
3920 output.append(b"(%s)" % b)
3921
3922 # multiple tags for a single parent separated by '/'
3923 t = b'/'.join(taglist)
3924 if t:
3925 output.append(t)
3926
3927 # multiple bookmarks for a single parent separated by '/'
3928 bm = b'/'.join(ctx.bookmarks())
3929 if bm:
3930 output.append(bm)
3931 else:
3932 if branch:
3933 output.append(ctx.branch())
3934
3935 if tags:
3936 output.extend(taglist)
3937
3938 if bookmarks:
3939 output.extend(ctx.bookmarks())
3940
3941 fm.data(node=ctx.hex())
3942 fm.data(branch=ctx.branch())
3943 fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
3944 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
3945 fm.context(ctx=ctx)
3946
3947 fm.plain(b"%s\n" % b' '.join(output))
3948 fm.end()
3949
3955
3950
3956
3951 @command(
3957 @command(
@@ -4291,12 +4297,15 b' def incoming(ui, repo, source=b"default"'
4291 ui.expandpath(source), opts.get(b'branch')
4297 ui.expandpath(source), opts.get(b'branch')
4292 )
4298 )
4293 other = hg.peer(repo, opts, source)
4299 other = hg.peer(repo, opts, source)
4294 if b'bookmarks' not in other.listkeys(b'namespaces'):
4300 try:
4295 ui.warn(_(b"remote doesn't support bookmarks\n"))
4301 if b'bookmarks' not in other.listkeys(b'namespaces'):
4296 return 0
4302 ui.warn(_(b"remote doesn't support bookmarks\n"))
4297 ui.pager(b'incoming')
4303 return 0
4298 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
4304 ui.pager(b'incoming')
4299 return bookmarks.incoming(ui, repo, other)
4305 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
4306 return bookmarks.incoming(ui, repo, other)
4307 finally:
4308 other.close()
4300
4309
4301 repo._subtoppath = ui.expandpath(source)
4310 repo._subtoppath = ui.expandpath(source)
4302 try:
4311 try:
@@ -4327,7 +4336,8 b' def init(ui, dest=b".", **opts):'
4327 Returns 0 on success.
4336 Returns 0 on success.
4328 """
4337 """
4329 opts = pycompat.byteskwargs(opts)
4338 opts = pycompat.byteskwargs(opts)
4330 hg.peer(ui, opts, ui.expandpath(dest), create=True)
4339 peer = hg.peer(ui, opts, ui.expandpath(dest), create=True)
4340 peer.close()
4331
4341
4332
4342
4333 @command(
4343 @command(
@@ -4963,12 +4973,15 b' def outgoing(ui, repo, dest=None, **opts'
4963 if opts.get(b'bookmarks'):
4973 if opts.get(b'bookmarks'):
4964 dest = path.pushloc or path.loc
4974 dest = path.pushloc or path.loc
4965 other = hg.peer(repo, opts, dest)
4975 other = hg.peer(repo, opts, dest)
4966 if b'bookmarks' not in other.listkeys(b'namespaces'):
4976 try:
4967 ui.warn(_(b"remote doesn't support bookmarks\n"))
4977 if b'bookmarks' not in other.listkeys(b'namespaces'):
4968 return 0
4978 ui.warn(_(b"remote doesn't support bookmarks\n"))
4969 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
4979 return 0
4970 ui.pager(b'outgoing')
4980 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
4971 return bookmarks.outgoing(ui, repo, other)
4981 ui.pager(b'outgoing')
4982 return bookmarks.outgoing(ui, repo, other)
4983 finally:
4984 other.close()
4972
4985
4973 repo._subtoppath = path.pushloc or path.loc
4986 repo._subtoppath = path.pushloc or path.loc
4974 try:
4987 try:
@@ -5679,63 +5692,67 b' def push(ui, repo, dest=None, **opts):'
5679 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
5692 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
5680 other = hg.peer(repo, opts, dest)
5693 other = hg.peer(repo, opts, dest)
5681
5694
5682 if revs:
5695 try:
5683 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5696 if revs:
5684 if not revs:
5697 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5698 if not revs:
5699 raise error.InputError(
5700 _(b"specified revisions evaluate to an empty set"),
5701 hint=_(b"use different revision arguments"),
5702 )
5703 elif path.pushrev:
5704 # It doesn't make any sense to specify ancestor revisions. So limit
5705 # to DAG heads to make discovery simpler.
5706 expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
5707 revs = scmutil.revrange(repo, [expr])
5708 revs = [repo[rev].node() for rev in revs]
5709 if not revs:
5710 raise error.InputError(
5711 _(b'default push revset for path evaluates to an empty set')
5712 )
5713 elif ui.configbool(b'commands', b'push.require-revs'):
5685 raise error.InputError(
5714 raise error.InputError(
5686 _(b"specified revisions evaluate to an empty set"),
5715 _(b'no revisions specified to push'),
5687 hint=_(b"use different revision arguments"),
5716 hint=_(b'did you mean "hg push -r ."?'),
5688 )
5717 )
5689 elif path.pushrev:
5718
5690 # It doesn't make any sense to specify ancestor revisions. So limit
5719 repo._subtoppath = dest
5691 # to DAG heads to make discovery simpler.
5720 try:
5692 expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
5721 # push subrepos depth-first for coherent ordering
5693 revs = scmutil.revrange(repo, [expr])
5722 c = repo[b'.']
5694 revs = [repo[rev].node() for rev in revs]
5723 subs = c.substate # only repos that are committed
5695 if not revs:
5724 for s in sorted(subs):
5696 raise error.InputError(
5725 result = c.sub(s).push(opts)
5697 _(b'default push revset for path evaluates to an empty set')
5726 if result == 0:
5698 )
5727 return not result
5699 elif ui.configbool(b'commands', b'push.require-revs'):
5728 finally:
5700 raise error.InputError(
5729 del repo._subtoppath
5701 _(b'no revisions specified to push'),
5730
5702 hint=_(b'did you mean "hg push -r ."?'),
5731 opargs = dict(
5732 opts.get(b'opargs', {})
5733 ) # copy opargs since we may mutate it
5734 opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
5735
5736 pushop = exchange.push(
5737 repo,
5738 other,
5739 opts.get(b'force'),
5740 revs=revs,
5741 newbranch=opts.get(b'new_branch'),
5742 bookmarks=opts.get(b'bookmark', ()),
5743 publish=opts.get(b'publish'),
5744 opargs=opargs,
5703 )
5745 )
5704
5746
5705 repo._subtoppath = dest
5747 result = not pushop.cgresult
5706 try:
5748
5707 # push subrepos depth-first for coherent ordering
5749 if pushop.bkresult is not None:
5708 c = repo[b'.']
5750 if pushop.bkresult == 2:
5709 subs = c.substate # only repos that are committed
5751 result = 2
5710 for s in sorted(subs):
5752 elif not result and pushop.bkresult:
5711 result = c.sub(s).push(opts)
5753 result = 2
5712 if result == 0:
5713 return not result
5714 finally:
5754 finally:
5715 del repo._subtoppath
5755 other.close()
5716
5717 opargs = dict(opts.get(b'opargs', {})) # copy opargs since we may mutate it
5718 opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
5719
5720 pushop = exchange.push(
5721 repo,
5722 other,
5723 opts.get(b'force'),
5724 revs=revs,
5725 newbranch=opts.get(b'new_branch'),
5726 bookmarks=opts.get(b'bookmark', ()),
5727 publish=opts.get(b'publish'),
5728 opargs=opargs,
5729 )
5730
5731 result = not pushop.cgresult
5732
5733 if pushop.bkresult is not None:
5734 if pushop.bkresult == 2:
5735 result = 2
5736 elif not result and pushop.bkresult:
5737 result = 2
5738
5739 return result
5756 return result
5740
5757
5741
5758
@@ -471,17 +471,20 b' def debugcapabilities(ui, path, **opts):'
471 """lists the capabilities of a remote peer"""
471 """lists the capabilities of a remote peer"""
472 opts = pycompat.byteskwargs(opts)
472 opts = pycompat.byteskwargs(opts)
473 peer = hg.peer(ui, opts, path)
473 peer = hg.peer(ui, opts, path)
474 caps = peer.capabilities()
474 try:
475 ui.writenoi18n(b'Main capabilities:\n')
475 caps = peer.capabilities()
476 for c in sorted(caps):
476 ui.writenoi18n(b'Main capabilities:\n')
477 ui.write(b' %s\n' % c)
477 for c in sorted(caps):
478 b2caps = bundle2.bundle2caps(peer)
478 ui.write(b' %s\n' % c)
479 if b2caps:
479 b2caps = bundle2.bundle2caps(peer)
480 ui.writenoi18n(b'Bundle2 capabilities:\n')
480 if b2caps:
481 for key, values in sorted(pycompat.iteritems(b2caps)):
481 ui.writenoi18n(b'Bundle2 capabilities:\n')
482 ui.write(b' %s\n' % key)
482 for key, values in sorted(pycompat.iteritems(b2caps)):
483 for v in values:
483 ui.write(b' %s\n' % key)
484 ui.write(b' %s\n' % v)
484 for v in values:
485 ui.write(b' %s\n' % v)
486 finally:
487 peer.close()
485
488
486
489
487 @command(
490 @command(
@@ -2615,12 +2618,17 b' def debugpeer(ui, path):'
2615 with ui.configoverride(overrides):
2618 with ui.configoverride(overrides):
2616 peer = hg.peer(ui, {}, path)
2619 peer = hg.peer(ui, {}, path)
2617
2620
2618 local = peer.local() is not None
2621 try:
2619 canpush = peer.canpush()
2622 local = peer.local() is not None
2620
2623 canpush = peer.canpush()
2621 ui.write(_(b'url: %s\n') % peer.url())
2624
2622 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2625 ui.write(_(b'url: %s\n') % peer.url())
2623 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2626 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2627 ui.write(
2628 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2629 )
2630 finally:
2631 peer.close()
2624
2632
2625
2633
2626 @command(
2634 @command(
@@ -2723,26 +2731,30 b' def debugpushkey(ui, repopath, namespace'
2723 """
2731 """
2724
2732
2725 target = hg.peer(ui, {}, repopath)
2733 target = hg.peer(ui, {}, repopath)
2726 if keyinfo:
2734 try:
2727 key, old, new = keyinfo
2735 if keyinfo:
2728 with target.commandexecutor() as e:
2736 key, old, new = keyinfo
2729 r = e.callcommand(
2737 with target.commandexecutor() as e:
2730 b'pushkey',
2738 r = e.callcommand(
2731 {
2739 b'pushkey',
2732 b'namespace': namespace,
2740 {
2733 b'key': key,
2741 b'namespace': namespace,
2734 b'old': old,
2742 b'key': key,
2735 b'new': new,
2743 b'old': old,
2736 },
2744 b'new': new,
2737 ).result()
2745 },
2738
2746 ).result()
2739 ui.status(pycompat.bytestr(r) + b'\n')
2747
2740 return not r
2748 ui.status(pycompat.bytestr(r) + b'\n')
2741 else:
2749 return not r
2742 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2750 else:
2743 ui.write(
2751 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2744 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2752 ui.write(
2745 )
2753 b"%s\t%s\n"
2754 % (stringutil.escapestr(k), stringutil.escapestr(v))
2755 )
2756 finally:
2757 target.close()
2746
2758
2747
2759
2748 @command(b'debugpvec', [], _(b'A B'))
2760 @command(b'debugpvec', [], _(b'A B'))
@@ -4095,19 +4107,22 b' def debugwhyunstable(ui, repo, rev):'
4095 def debugwireargs(ui, repopath, *vals, **opts):
4107 def debugwireargs(ui, repopath, *vals, **opts):
4096 opts = pycompat.byteskwargs(opts)
4108 opts = pycompat.byteskwargs(opts)
4097 repo = hg.peer(ui, opts, repopath)
4109 repo = hg.peer(ui, opts, repopath)
4098 for opt in cmdutil.remoteopts:
4110 try:
4099 del opts[opt[1]]
4111 for opt in cmdutil.remoteopts:
4100 args = {}
4112 del opts[opt[1]]
4101 for k, v in pycompat.iteritems(opts):
4113 args = {}
4102 if v:
4114 for k, v in pycompat.iteritems(opts):
4103 args[k] = v
4115 if v:
4104 args = pycompat.strkwargs(args)
4116 args[k] = v
4105 # run twice to check that we don't mess up the stream for the next command
4117 args = pycompat.strkwargs(args)
4106 res1 = repo.debugwireargs(*vals, **args)
4118 # run twice to check that we don't mess up the stream for the next command
4107 res2 = repo.debugwireargs(*vals, **args)
4119 res1 = repo.debugwireargs(*vals, **args)
4108 ui.write(b"%s\n" % res1)
4120 res2 = repo.debugwireargs(*vals, **args)
4109 if res1 != res2:
4121 ui.write(b"%s\n" % res1)
4110 ui.warn(b"%s\n" % res2)
4122 if res1 != res2:
4123 ui.warn(b"%s\n" % res2)
4124 finally:
4125 repo.close()
4111
4126
4112
4127
4113 def _parsewirelangblocks(fh):
4128 def _parsewirelangblocks(fh):
@@ -678,140 +678,148 b' def clone('
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
681 srclock = destlock = cleandir = None
682 destpeer = None
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682
685
683 if dest is None:
686 if dest is None:
684 dest = defaultdest(source)
687 dest = defaultdest(source)
685 if dest:
688 if dest:
686 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
687 else:
690 else:
688 dest = ui.expandpath(dest)
691 dest = ui.expandpath(dest)
689
692
690 dest = util.urllocalpath(dest)
693 dest = util.urllocalpath(dest)
691 source = util.urllocalpath(source)
694 source = util.urllocalpath(source)
692
695
693 if not dest:
696 if not dest:
694 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
695
698
696 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
697 if destvfs.lexists():
700 if destvfs.lexists():
698 if not destvfs.isdir():
701 if not destvfs.isdir():
699 raise error.InputError(_(b"destination '%s' already exists") % dest)
702 raise error.InputError(
700 elif destvfs.listdir():
703 _(b"destination '%s' already exists") % dest
701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
704 )
705 elif destvfs.listdir():
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
708 )
702
709
703 createopts = {}
710 createopts = {}
704 narrow = False
711 narrow = False
705
706 if storeincludepats is not None:
707 narrowspec.validatepatterns(storeincludepats)
708 narrow = True
709
712
710 if storeexcludepats is not None:
713 if storeincludepats is not None:
711 narrowspec.validatepatterns(storeexcludepats)
714 narrowspec.validatepatterns(storeincludepats)
712 narrow = True
715 narrow = True
716
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
713
720
714 if narrow:
721 if narrow:
715 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
716 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
717 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
718
725
719 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
720
727
721 if depth:
728 if depth:
722 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
723
730
724 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
725 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
726 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
727 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
728 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
729 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
731 # data.
738 # data.
732 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
733
740
734 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
735 ui.status(
742 ui.status(
736 _(
743 _(
737 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
738 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
739 )
747 )
740 )
748 else:
741 else:
749 ui.status(
742 ui.status(
750 _(
743 _(
751 b'(remote is using large file support (lfs); lfs will '
744 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
745 b'be enabled for this repository)\n'
753 )
746 )
754 )
747 )
748
755
749 shareopts = shareopts or {}
756 shareopts = shareopts or {}
750 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
751 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
752 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
753 sharepath = None
760 sharepath = None
754 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
755 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
756 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
757 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
759 try:
766 try:
760 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
761 rootnode = e.callcommand(
768 rootnode = e.callcommand(
762 b'lookup',
769 b'lookup',
763 {
770 {
764 b'key': b'0',
771 b'key': b'0',
765 },
772 },
766 ).result()
773 ).result()
767
774
768 if rootnode != nullid:
775 if rootnode != nullid:
769 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
770 else:
777 else:
778 ui.status(
779 _(
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
782 )
783 )
784 except error.RepoLookupError:
771 ui.status(
785 ui.status(
772 _(
786 _(
773 b'(not using pooled storage: '
787 b'(not using pooled storage: '
774 b'remote appears to be empty)\n'
788 b'unable to resolve identity of remote)\n'
775 )
789 )
776 )
790 )
777 except error.RepoLookupError:
791 elif sharenamemode == b'remote':
778 ui.status(
792 sharepath = os.path.join(
779 _(
793 sharepool, hex(hashutil.sha1(source).digest())
780 b'(not using pooled storage: '
794 )
781 b'unable to resolve identity of remote)\n'
795 else:
782 )
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
783 )
798 )
784 elif sharenamemode == b'remote':
799
785 sharepath = os.path.join(
800 # TODO this is a somewhat arbitrary restriction.
786 sharepool, hex(hashutil.sha1(source).digest())
801 if narrow:
787 )
802 ui.status(
788 else:
803 _(b'(pooled storage not supported for narrow clones)\n')
789 raise error.Abort(
804 )
790 _(b'unknown share naming mode: %s') % sharenamemode
805 sharepath = None
791 )
792
806
793 # TODO this is a somewhat arbitrary restriction.
807 if sharepath:
794 if narrow:
808 return clonewithshare(
795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
809 ui,
796 sharepath = None
810 peeropts,
811 sharepath,
812 source,
813 srcpeer,
814 dest,
815 pull=pull,
816 rev=revs,
817 update=update,
818 stream=stream,
819 )
797
820
798 if sharepath:
821 srcrepo = srcpeer.local()
799 return clonewithshare(
800 ui,
801 peeropts,
802 sharepath,
803 source,
804 srcpeer,
805 dest,
806 pull=pull,
807 rev=revs,
808 update=update,
809 stream=stream,
810 )
811
822
812 srclock = destlock = cleandir = None
813 srcrepo = srcpeer.local()
814 try:
815 abspath = origsource
823 abspath = origsource
816 if islocal(origsource):
824 if islocal(origsource):
817 abspath = os.path.abspath(util.urllocalpath(origsource))
825 abspath = os.path.abspath(util.urllocalpath(origsource))
@@ -1052,6 +1060,8 b' def clone('
1052 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1053 if srcpeer is not None:
1061 if srcpeer is not None:
1054 srcpeer.close()
1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1055 return srcpeer, destpeer
1065 return srcpeer, destpeer
1056
1066
1057
1067
@@ -1253,15 +1263,17 b' def _incoming('
1253 """
1263 """
1254 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1264 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1255 other = peer(repo, opts, source)
1265 other = peer(repo, opts, source)
1256 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1266 cleanupfn = other.close
1257 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1267 try:
1268 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1269 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1258
1270
1259 if revs:
1271 if revs:
1260 revs = [other.lookup(rev) for rev in revs]
1272 revs = [other.lookup(rev) for rev in revs]
1261 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1273 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1262 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1274 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1263 )
1275 )
1264 try:
1276
1265 if not chlist:
1277 if not chlist:
1266 ui.status(_(b"no changes found\n"))
1278 ui.status(_(b"no changes found\n"))
1267 return subreporecurse()
1279 return subreporecurse()
@@ -1320,13 +1332,17 b' def _outgoing(ui, repo, dest, opts):'
1320 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1332 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1321
1333
1322 other = peer(repo, opts, dest)
1334 other = peer(repo, opts, dest)
1323 outgoing = discovery.findcommonoutgoing(
1335 try:
1324 repo, other, revs, force=opts.get(b'force')
1336 outgoing = discovery.findcommonoutgoing(
1325 )
1337 repo, other, revs, force=opts.get(b'force')
1326 o = outgoing.missing
1338 )
1327 if not o:
1339 o = outgoing.missing
1328 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1340 if not o:
1329 return o, other
1341 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1342 return o, other
1343 except: # re-raises
1344 other.close()
1345 raise
1330
1346
1331
1347
1332 def outgoing(ui, repo, dest, opts):
1348 def outgoing(ui, repo, dest, opts):
@@ -1341,27 +1357,30 b' def outgoing(ui, repo, dest, opts):'
1341
1357
1342 limit = logcmdutil.getlimit(opts)
1358 limit = logcmdutil.getlimit(opts)
1343 o, other = _outgoing(ui, repo, dest, opts)
1359 o, other = _outgoing(ui, repo, dest, opts)
1344 if not o:
1360 try:
1345 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1361 if not o:
1346 return recurse()
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 return recurse()
1347
1364
1348 if opts.get(b'newest_first'):
1365 if opts.get(b'newest_first'):
1349 o.reverse()
1366 o.reverse()
1350 ui.pager(b'outgoing')
1367 ui.pager(b'outgoing')
1351 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1368 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1352 count = 0
1369 count = 0
1353 for n in o:
1370 for n in o:
1354 if limit is not None and count >= limit:
1371 if limit is not None and count >= limit:
1355 break
1372 break
1356 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1373 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1357 if opts.get(b'no_merges') and len(parents) == 2:
1374 if opts.get(b'no_merges') and len(parents) == 2:
1358 continue
1375 continue
1359 count += 1
1376 count += 1
1360 displayer.show(repo[n])
1377 displayer.show(repo[n])
1361 displayer.close()
1378 displayer.close()
1362 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1379 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1363 recurse()
1380 recurse()
1364 return 0 # exit code is zero since we found outgoing changes
1381 return 0 # exit code is zero since we found outgoing changes
1382 finally:
1383 other.close()
1365
1384
1366
1385
1367 def verify(repo, level=None):
1386 def verify(repo, level=None):
@@ -1841,9 +1841,12 b' def outgoing(repo, subset, x):'
1841 if revs:
1841 if revs:
1842 revs = [repo.lookup(rev) for rev in revs]
1842 revs = [repo.lookup(rev) for rev in revs]
1843 other = hg.peer(repo, {}, dest)
1843 other = hg.peer(repo, {}, dest)
1844 repo.ui.pushbuffer()
1844 try:
1845 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1845 repo.ui.pushbuffer()
1846 repo.ui.popbuffer()
1846 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1847 repo.ui.popbuffer()
1848 finally:
1849 other.close()
1847 cl = repo.changelog
1850 cl = repo.changelog
1848 o = {cl.rev(r) for r in outgoing.missing}
1851 o = {cl.rev(r) for r in outgoing.missing}
1849 return subset & o
1852 return subset & o
@@ -175,10 +175,7 b' def _cleanuppipes(ui, pipei, pipeo, pipe'
175 # to deadlocks due to a peer get gc'ed in a fork
175 # to deadlocks due to a peer get gc'ed in a fork
176 # We add our own stack trace, because the stacktrace when called
176 # We add our own stack trace, because the stacktrace when called
177 # from __del__ is useless.
177 # from __del__ is useless.
178 if False: # enabled in next commit
178 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
179 ui.develwarn(
180 b'missing close on SSH connection created at:\n%s' % warn
181 )
182
179
183
180
184 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
@@ -716,13 +716,17 b' class hgsubrepo(abstractsubrepo):'
716 _(b'sharing subrepo %s from %s\n')
716 _(b'sharing subrepo %s from %s\n')
717 % (subrelpath(self), srcurl)
717 % (subrelpath(self), srcurl)
718 )
718 )
719 shared = hg.share(
719 peer = getpeer()
720 self._repo._subparent.baseui,
720 try:
721 getpeer(),
721 shared = hg.share(
722 self._repo.root,
722 self._repo._subparent.baseui,
723 update=False,
723 peer,
724 bookmarks=False,
724 self._repo.root,
725 )
725 update=False,
726 bookmarks=False,
727 )
728 finally:
729 peer.close()
726 self._repo = shared.local()
730 self._repo = shared.local()
727 else:
731 else:
728 # TODO: find a common place for this and this code in the
732 # TODO: find a common place for this and this code in the
@@ -743,14 +747,18 b' class hgsubrepo(abstractsubrepo):'
743 _(b'cloning subrepo %s from %s\n')
747 _(b'cloning subrepo %s from %s\n')
744 % (subrelpath(self), util.hidepassword(srcurl))
748 % (subrelpath(self), util.hidepassword(srcurl))
745 )
749 )
746 other, cloned = hg.clone(
750 peer = getpeer()
747 self._repo._subparent.baseui,
751 try:
748 {},
752 other, cloned = hg.clone(
749 getpeer(),
753 self._repo._subparent.baseui,
750 self._repo.root,
754 {},
751 update=False,
755 peer,
752 shareopts=shareopts,
756 self._repo.root,
753 )
757 update=False,
758 shareopts=shareopts,
759 )
760 finally:
761 peer.close()
754 self._repo = cloned.local()
762 self._repo = cloned.local()
755 self._initrepo(parentrepo, source, create=True)
763 self._initrepo(parentrepo, source, create=True)
756 self._cachestorehash(srcurl)
764 self._cachestorehash(srcurl)
@@ -760,7 +768,11 b' class hgsubrepo(abstractsubrepo):'
760 % (subrelpath(self), util.hidepassword(srcurl))
768 % (subrelpath(self), util.hidepassword(srcurl))
761 )
769 )
762 cleansub = self.storeclean(srcurl)
770 cleansub = self.storeclean(srcurl)
763 exchange.pull(self._repo, getpeer())
771 peer = getpeer()
772 try:
773 exchange.pull(self._repo, peer)
774 finally:
775 peer.close()
764 if cleansub:
776 if cleansub:
765 # keep the repo clean after pull
777 # keep the repo clean after pull
766 self._cachestorehash(srcurl)
778 self._cachestorehash(srcurl)
@@ -845,7 +857,10 b' class hgsubrepo(abstractsubrepo):'
845 % (subrelpath(self), util.hidepassword(dsturl))
857 % (subrelpath(self), util.hidepassword(dsturl))
846 )
858 )
847 other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
859 other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
848 res = exchange.push(self._repo, other, force, newbranch=newbranch)
860 try:
861 res = exchange.push(self._repo, other, force, newbranch=newbranch)
862 finally:
863 other.close()
849
864
850 # the repo is now clean
865 # the repo is now clean
851 self._cachestorehash(dsturl)
866 self._cachestorehash(dsturl)
@@ -21,7 +21,10 b' def getflogheads(ui, repo, path):'
21 dest = repo.ui.expandpath(b'default')
21 dest = repo.ui.expandpath(b'default')
22 peer = hg.peer(repo, {}, dest)
22 peer = hg.peer(repo, {}, dest)
23
23
24 flogheads = peer.x_rfl_getflogheads(path)
24 try:
25 flogheads = peer.x_rfl_getflogheads(path)
26 finally:
27 peer.close()
25
28
26 if flogheads:
29 if flogheads:
27 for head in flogheads:
30 for head in flogheads:
@@ -361,6 +361,7 b' Empty [acl.allow]'
361 bundle2-input-bundle: 5 parts total
361 bundle2-input-bundle: 5 parts total
362 transaction abort!
362 transaction abort!
363 rollback completed
363 rollback completed
364 truncating cache/rbc-revs-v1 to 8
364 abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
365 abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
365 no rollback information available
366 no rollback information available
366 0:6675d58eff77
367 0:6675d58eff77
@@ -808,7 +809,6 b' fred is not blocked from moving bookmark'
808 acl: acl.deny.bookmarks not enabled
809 acl: acl.deny.bookmarks not enabled
809 acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
810 acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
810 bundle2-input-bundle: 7 parts total
811 bundle2-input-bundle: 7 parts total
811 truncating cache/rbc-revs-v1 to 8
812 updating the branch cache
812 updating the branch cache
813 invalid branch cache (served.hidden): tip differs
813 invalid branch cache (served.hidden): tip differs
814 added 1 changesets with 1 changes to 1 files
814 added 1 changesets with 1 changes to 1 files
@@ -900,6 +900,7 b' fred is not allowed to move bookmarks'
900 bundle2-input-bundle: 7 parts total
900 bundle2-input-bundle: 7 parts total
901 transaction abort!
901 transaction abort!
902 rollback completed
902 rollback completed
903 truncating cache/rbc-revs-v1 to 8
903 abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
904 abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
904 no rollback information available
905 no rollback information available
905 0:6675d58eff77
906 0:6675d58eff77
@@ -985,7 +986,6 b' barney is allowed everywhere'
985 bundle2-input-part: "phase-heads" supported
986 bundle2-input-part: "phase-heads" supported
986 bundle2-input-part: total payload size 24
987 bundle2-input-part: total payload size 24
987 bundle2-input-bundle: 5 parts total
988 bundle2-input-bundle: 5 parts total
988 truncating cache/rbc-revs-v1 to 8
989 updating the branch cache
989 updating the branch cache
990 added 3 changesets with 3 changes to 3 files
990 added 3 changesets with 3 changes to 3 files
991 bundle2-output-bundle: "HG20", 1 parts total
991 bundle2-output-bundle: "HG20", 1 parts total
@@ -1073,6 +1073,7 b' wilma can change files with a .txt exten'
1073 bundle2-input-bundle: 5 parts total
1073 bundle2-input-bundle: 5 parts total
1074 transaction abort!
1074 transaction abort!
1075 rollback completed
1075 rollback completed
1076 truncating cache/rbc-revs-v1 to 8
1076 abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
1077 abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
1077 no rollback information available
1078 no rollback information available
1078 0:6675d58eff77
1079 0:6675d58eff77
@@ -1322,7 +1323,6 b' acl.config can set only [acl.allow]/[acl'
1322 bundle2-input-part: "phase-heads" supported
1323 bundle2-input-part: "phase-heads" supported
1323 bundle2-input-part: total payload size 24
1324 bundle2-input-part: total payload size 24
1324 bundle2-input-bundle: 5 parts total
1325 bundle2-input-bundle: 5 parts total
1325 truncating cache/rbc-revs-v1 to 8
1326 updating the branch cache
1326 updating the branch cache
1327 added 3 changesets with 3 changes to 3 files
1327 added 3 changesets with 3 changes to 3 files
1328 bundle2-output-bundle: "HG20", 1 parts total
1328 bundle2-output-bundle: "HG20", 1 parts total
@@ -1499,6 +1499,7 b' no one is allowed inside foo/Bar/'
1499 bundle2-input-bundle: 5 parts total
1499 bundle2-input-bundle: 5 parts total
1500 transaction abort!
1500 transaction abort!
1501 rollback completed
1501 rollback completed
1502 truncating cache/rbc-revs-v1 to 8
1502 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1503 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1503 no rollback information available
1504 no rollback information available
1504 0:6675d58eff77
1505 0:6675d58eff77
@@ -1583,7 +1584,6 b' OS-level groups'
1583 bundle2-input-part: "phase-heads" supported
1584 bundle2-input-part: "phase-heads" supported
1584 bundle2-input-part: total payload size 24
1585 bundle2-input-part: total payload size 24
1585 bundle2-input-bundle: 5 parts total
1586 bundle2-input-bundle: 5 parts total
1586 truncating cache/rbc-revs-v1 to 8
1587 updating the branch cache
1587 updating the branch cache
1588 added 3 changesets with 3 changes to 3 files
1588 added 3 changesets with 3 changes to 3 files
1589 bundle2-output-bundle: "HG20", 1 parts total
1589 bundle2-output-bundle: "HG20", 1 parts total
@@ -1671,6 +1671,7 b' OS-level groups'
1671 bundle2-input-bundle: 5 parts total
1671 bundle2-input-bundle: 5 parts total
1672 transaction abort!
1672 transaction abort!
1673 rollback completed
1673 rollback completed
1674 truncating cache/rbc-revs-v1 to 8
1674 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1675 abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
1675 no rollback information available
1676 no rollback information available
1676 0:6675d58eff77
1677 0:6675d58eff77
@@ -382,6 +382,7 b' test http authentication'
382 devel-peer-request: 16 bytes of commands arguments in headers
382 devel-peer-request: 16 bytes of commands arguments in headers
383 devel-peer-request: finished in *.???? seconds (200) (glob)
383 devel-peer-request: finished in *.???? seconds (200) (glob)
384 received listkey for "phases": 15 bytes
384 received listkey for "phases": 15 bytes
385 (sent 9 HTTP requests and 3898 bytes; received 920 bytes in responses)
385 $ hg rollback -q
386 $ hg rollback -q
386
387
387 $ sed 's/.*] "/"/' < ../access.log
388 $ sed 's/.*] "/"/' < ../access.log
@@ -462,6 +462,7 b' lfs content, and the extension enabled.'
462 remote: adding manifests
462 remote: adding manifests
463 remote: adding file changes
463 remote: adding file changes
464 remote: added 1 changesets with 1 changes to 1 files
464 remote: added 1 changesets with 1 changes to 1 files
465 (sent 8 HTTP requests and 3526 bytes; received 961 bytes in responses) (?)
465 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
466 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
466 .hg/requires:lfs
467 .hg/requires:lfs
467 $TESTTMP/server/.hg/requires:lfs
468 $TESTTMP/server/.hg/requires:lfs
General Comments 0
You need to be logged in to leave comments. Login now