##// END OF EJS Templates
peer: pass the `path` object to `make_peer`...
marmoute -
r50652:5f71fff8 default
parent child Browse files
Show More
@@ -1,176 +1,175 b''
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """extend schemes with shortcuts to repository swarms
6 """extend schemes with shortcuts to repository swarms
7
7
8 This extension allows you to specify shortcuts for parent URLs with a
8 This extension allows you to specify shortcuts for parent URLs with a
9 lot of repositories to act like a scheme, for example::
9 lot of repositories to act like a scheme, for example::
10
10
11 [schemes]
11 [schemes]
12 py = http://code.python.org/hg/
12 py = http://code.python.org/hg/
13
13
14 After that you can use it like::
14 After that you can use it like::
15
15
16 hg clone py://trunk/
16 hg clone py://trunk/
17
17
18 Additionally there is support for some more complex schemas, for
18 Additionally there is support for some more complex schemas, for
19 example used by Google Code::
19 example used by Google Code::
20
20
21 [schemes]
21 [schemes]
22 gcode = http://{1}.googlecode.com/hg/
22 gcode = http://{1}.googlecode.com/hg/
23
23
24 The syntax is taken from Mercurial templates, and you have unlimited
24 The syntax is taken from Mercurial templates, and you have unlimited
25 number of variables, starting with ``{1}`` and continuing with
25 number of variables, starting with ``{1}`` and continuing with
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 just appended to an URL.
28 just appended to an URL.
29
29
30 For convenience, the extension adds these schemes by default::
30 For convenience, the extension adds these schemes by default::
31
31
32 [schemes]
32 [schemes]
33 py = http://hg.python.org/
33 py = http://hg.python.org/
34 bb = https://bitbucket.org/
34 bb = https://bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
36 gcode = https://{1}.googlecode.com/hg/
36 gcode = https://{1}.googlecode.com/hg/
37 kiln = https://{1}.kilnhg.com/Repo/
37 kiln = https://{1}.kilnhg.com/Repo/
38
38
39 You can override a predefined scheme by defining a new scheme with the
39 You can override a predefined scheme by defining a new scheme with the
40 same name.
40 same name.
41 """
41 """
42
42
43 import os
43 import os
44 import re
44 import re
45
45
46 from mercurial.i18n import _
46 from mercurial.i18n import _
47 from mercurial import (
47 from mercurial import (
48 error,
48 error,
49 extensions,
49 extensions,
50 hg,
50 hg,
51 pycompat,
51 pycompat,
52 registrar,
52 registrar,
53 templater,
53 templater,
54 )
54 )
55 from mercurial.utils import (
55 from mercurial.utils import (
56 urlutil,
56 urlutil,
57 )
57 )
58
58
59 cmdtable = {}
59 cmdtable = {}
60 command = registrar.command(cmdtable)
60 command = registrar.command(cmdtable)
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 _partre = re.compile(br'{(\d+)\}')
67 _partre = re.compile(br'{(\d+)\}')
68
68
69
69
70 class ShortRepository:
70 class ShortRepository:
71 def __init__(self, url, scheme, templater):
71 def __init__(self, url, scheme, templater):
72 self.scheme = scheme
72 self.scheme = scheme
73 self.templater = templater
73 self.templater = templater
74 self.url = url
74 self.url = url
75 try:
75 try:
76 self.parts = max(map(int, _partre.findall(self.url)))
76 self.parts = max(map(int, _partre.findall(self.url)))
77 except ValueError:
77 except ValueError:
78 self.parts = 0
78 self.parts = 0
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return b'<ShortRepository: %s>' % self.scheme
81 return b'<ShortRepository: %s>' % self.scheme
82
82
83 def make_peer(self, ui, url, *args, **kwargs):
83 def make_peer(self, ui, path, *args, **kwargs):
84 url = self.resolve(url)
84 new_url = self.resolve(path.rawloc)
85 u = urlutil.url(url)
85 path = path.copy(new_raw_location=new_url)
86 scheme = u.scheme or b'file'
86 cls = hg.peer_schemes.get(path.url.scheme)
87 cls = hg.peer_schemes.get(scheme)
88 if cls is not None:
87 if cls is not None:
89 return cls.make_peer(ui, url, *args, **kwargs)
88 return cls.make_peer(ui, path, *args, **kwargs)
90 return None
89 return None
91
90
92 def instance(self, ui, url, create, intents=None, createopts=None):
91 def instance(self, ui, url, create, intents=None, createopts=None):
93 url = self.resolve(url)
92 url = self.resolve(url)
94 u = urlutil.url(url)
93 u = urlutil.url(url)
95 scheme = u.scheme or b'file'
94 scheme = u.scheme or b'file'
96 if scheme in hg.peer_schemes:
95 if scheme in hg.peer_schemes:
97 cls = hg.peer_schemes[scheme]
96 cls = hg.peer_schemes[scheme]
98 elif scheme in hg.repo_schemes:
97 elif scheme in hg.repo_schemes:
99 cls = hg.repo_schemes[scheme]
98 cls = hg.repo_schemes[scheme]
100 else:
99 else:
101 cls = hg.LocalFactory
100 cls = hg.LocalFactory
102 return cls.instance(
101 return cls.instance(
103 ui, url, create, intents=intents, createopts=createopts
102 ui, url, create, intents=intents, createopts=createopts
104 )
103 )
105
104
106 def resolve(self, url):
105 def resolve(self, url):
107 # Should this use the urlutil.url class, or is manual parsing better?
106 # Should this use the urlutil.url class, or is manual parsing better?
108 try:
107 try:
109 url = url.split(b'://', 1)[1]
108 url = url.split(b'://', 1)[1]
110 except IndexError:
109 except IndexError:
111 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
110 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
112 parts = url.split(b'/', self.parts)
111 parts = url.split(b'/', self.parts)
113 if len(parts) > self.parts:
112 if len(parts) > self.parts:
114 tail = parts[-1]
113 tail = parts[-1]
115 parts = parts[:-1]
114 parts = parts[:-1]
116 else:
115 else:
117 tail = b''
116 tail = b''
118 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
117 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
119 return b''.join(self.templater.process(self.url, context)) + tail
118 return b''.join(self.templater.process(self.url, context)) + tail
120
119
121
120
122 def hasdriveletter(orig, path):
121 def hasdriveletter(orig, path):
123 if path:
122 if path:
124 for scheme in schemes:
123 for scheme in schemes:
125 if path.startswith(scheme + b':'):
124 if path.startswith(scheme + b':'):
126 return False
125 return False
127 return orig(path)
126 return orig(path)
128
127
129
128
130 schemes = {
129 schemes = {
131 b'py': b'http://hg.python.org/',
130 b'py': b'http://hg.python.org/',
132 b'bb': b'https://bitbucket.org/',
131 b'bb': b'https://bitbucket.org/',
133 b'bb+ssh': b'ssh://hg@bitbucket.org/',
132 b'bb+ssh': b'ssh://hg@bitbucket.org/',
134 b'gcode': b'https://{1}.googlecode.com/hg/',
133 b'gcode': b'https://{1}.googlecode.com/hg/',
135 b'kiln': b'https://{1}.kilnhg.com/Repo/',
134 b'kiln': b'https://{1}.kilnhg.com/Repo/',
136 }
135 }
137
136
138
137
139 def _check_drive_letter(scheme):
138 def _check_drive_letter(scheme):
140 """check if a scheme conflict with a Windows drive letter"""
139 """check if a scheme conflict with a Windows drive letter"""
141 if (
140 if (
142 pycompat.iswindows
141 pycompat.iswindows
143 and len(scheme) == 1
142 and len(scheme) == 1
144 and scheme.isalpha()
143 and scheme.isalpha()
145 and os.path.exists(b'%s:\\' % scheme)
144 and os.path.exists(b'%s:\\' % scheme)
146 ):
145 ):
147 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
146 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
148 msg %= (scheme, scheme.upper())
147 msg %= (scheme, scheme.upper())
149 raise error.Abort(msg)
148 raise error.Abort(msg)
150
149
151
150
152 def extsetup(ui):
151 def extsetup(ui):
153 schemes.update(dict(ui.configitems(b'schemes')))
152 schemes.update(dict(ui.configitems(b'schemes')))
154 t = templater.engine(templater.parse)
153 t = templater.engine(templater.parse)
155 for scheme, url in schemes.items():
154 for scheme, url in schemes.items():
156 _check_drive_letter(schemes)
155 _check_drive_letter(schemes)
157 url_scheme = urlutil.url(url).scheme
156 url_scheme = urlutil.url(url).scheme
158 if url_scheme in hg.peer_schemes:
157 if url_scheme in hg.peer_schemes:
159 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
158 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
160 else:
159 else:
161 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
160 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
162
161
163 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
162 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
164
163
165
164
166 @command(b'debugexpandscheme', norepo=True)
165 @command(b'debugexpandscheme', norepo=True)
167 def expandscheme(ui, url, **opts):
166 def expandscheme(ui, url, **opts):
168 """given a repo path, provide the scheme-expanded path"""
167 """given a repo path, provide the scheme-expanded path"""
169 scheme = urlutil.url(url).scheme
168 scheme = urlutil.url(url).scheme
170 if scheme in hg.peer_schemes:
169 if scheme in hg.peer_schemes:
171 cls = hg.peer_schemes[scheme]
170 cls = hg.peer_schemes[scheme]
172 else:
171 else:
173 cls = hg.repo_schemes.get(scheme)
172 cls = hg.repo_schemes.get(scheme)
174 if cls is not None and isinstance(cls, ShortRepository):
173 if cls is not None and isinstance(cls, ShortRepository):
175 url = cls.resolve(url)
174 url = cls.resolve(url)
176 ui.write(url + b'\n')
175 ui.write(url + b'\n')
@@ -1,1668 +1,1668 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 u = urlutil.url(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
170 cls = peer_schemes[scheme]
171 cls.make_peer # make sure we load the module
171 cls.make_peer # make sure we load the module
172 elif scheme in repo_schemes:
172 elif scheme in repo_schemes:
173 cls = repo_schemes[scheme]
173 cls = repo_schemes[scheme]
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 else:
175 else:
176 cls = LocalFactory
176 cls = LocalFactory
177 if util.safehasattr(cls, 'islocal'):
177 if util.safehasattr(cls, 'islocal'):
178 return cls.islocal(repo) # pytype: disable=module-attr
178 return cls.islocal(repo) # pytype: disable=module-attr
179 return False
179 return False
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 return repo.local()
181 return repo.local()
182
182
183
183
184 def openpath(ui, path, sendaccept=True):
184 def openpath(ui, path, sendaccept=True):
185 '''open path with open if local, url.open if remote'''
185 '''open path with open if local, url.open if remote'''
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 if pathurl.islocal():
187 if pathurl.islocal():
188 return util.posixfile(pathurl.localpath(), b'rb')
188 return util.posixfile(pathurl.localpath(), b'rb')
189 else:
189 else:
190 return url.open(ui, path, sendaccept=sendaccept)
190 return url.open(ui, path, sendaccept=sendaccept)
191
191
192
192
193 # a list of (ui, repo) functions called for wire peer initialization
193 # a list of (ui, repo) functions called for wire peer initialization
194 wirepeersetupfuncs = []
194 wirepeersetupfuncs = []
195
195
196
196
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
199 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
200 f(ui, obj)
200 f(ui, obj)
201 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
203 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
206 if hook:
206 if hook:
207 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
208 hook(ui, obj)
208 hook(ui, obj)
209 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 if not obj.local():
212 if not obj.local():
213 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
214 f(ui, obj)
214 f(ui, obj)
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 scheme = urlutil.url(path).scheme
226 scheme = urlutil.url(path).scheme
227 if scheme is None:
227 if scheme is None:
228 scheme = b'file'
228 scheme = b'file'
229 cls = repo_schemes.get(scheme)
229 cls = repo_schemes.get(scheme)
230 if cls is None:
230 if cls is None:
231 if scheme in peer_schemes:
231 if scheme in peer_schemes:
232 raise error.Abort(_(b"repository '%s' is not local") % path)
232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 cls = LocalFactory
233 cls = LocalFactory
234 repo = cls.instance(
234 repo = cls.instance(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 intents=intents,
238 intents=intents,
239 createopts=createopts,
239 createopts=createopts,
240 )
240 )
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 return repo.filtered(b'visible')
242 return repo.filtered(b'visible')
243
243
244
244
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
246 '''return a repository peer for the specified path'''
246 '''return a repository peer for the specified path'''
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
248 rui = remoteui(uiorrepo, opts)
248 rui = remoteui(uiorrepo, opts)
249 if util.safehasattr(path, 'url'):
249 if util.safehasattr(path, 'url'):
250 # this is already a urlutil.path object
250 # this is already a urlutil.path object
251 peer_path = path
251 peer_path = path
252 else:
252 else:
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 if scheme in peer_schemes:
255 if scheme in peer_schemes:
256 cls = peer_schemes[scheme]
256 cls = peer_schemes[scheme]
257 peer = cls.make_peer(
257 peer = cls.make_peer(
258 rui,
258 rui,
259 peer_path.loc,
259 peer_path,
260 create,
260 create,
261 intents=intents,
261 intents=intents,
262 createopts=createopts,
262 createopts=createopts,
263 )
263 )
264 _setup_repo_or_peer(rui, peer)
264 _setup_repo_or_peer(rui, peer)
265 else:
265 else:
266 # this is a repository
266 # this is a repository
267 repo_path = peer_path.loc # pytype: disable=attribute-error
267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 if not repo_path:
268 if not repo_path:
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
270 repo = repository(
270 repo = repository(
271 rui,
271 rui,
272 repo_path,
272 repo_path,
273 create,
273 create,
274 intents=intents,
274 intents=intents,
275 createopts=createopts,
275 createopts=createopts,
276 )
276 )
277 peer = repo.peer(path=peer_path)
277 peer = repo.peer(path=peer_path)
278 return peer
278 return peer
279
279
280
280
281 def defaultdest(source):
281 def defaultdest(source):
282 """return default destination of clone if none is given
282 """return default destination of clone if none is given
283
283
284 >>> defaultdest(b'foo')
284 >>> defaultdest(b'foo')
285 'foo'
285 'foo'
286 >>> defaultdest(b'/foo/bar')
286 >>> defaultdest(b'/foo/bar')
287 'bar'
287 'bar'
288 >>> defaultdest(b'/')
288 >>> defaultdest(b'/')
289 ''
289 ''
290 >>> defaultdest(b'')
290 >>> defaultdest(b'')
291 ''
291 ''
292 >>> defaultdest(b'http://example.org/')
292 >>> defaultdest(b'http://example.org/')
293 ''
293 ''
294 >>> defaultdest(b'http://example.org/foo/')
294 >>> defaultdest(b'http://example.org/foo/')
295 'foo'
295 'foo'
296 """
296 """
297 path = urlutil.url(source).path
297 path = urlutil.url(source).path
298 if not path:
298 if not path:
299 return b''
299 return b''
300 return os.path.basename(os.path.normpath(path))
300 return os.path.basename(os.path.normpath(path))
301
301
302
302
303 def sharedreposource(repo):
303 def sharedreposource(repo):
304 """Returns repository object for source repository of a shared repo.
304 """Returns repository object for source repository of a shared repo.
305
305
306 If repo is not a shared repository, returns None.
306 If repo is not a shared repository, returns None.
307 """
307 """
308 if repo.sharedpath == repo.path:
308 if repo.sharedpath == repo.path:
309 return None
309 return None
310
310
311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
312 return repo.srcrepo
312 return repo.srcrepo
313
313
314 # the sharedpath always ends in the .hg; we want the path to the repo
314 # the sharedpath always ends in the .hg; we want the path to the repo
315 source = repo.vfs.split(repo.sharedpath)[0]
315 source = repo.vfs.split(repo.sharedpath)[0]
316 srcurl, branches = urlutil.parseurl(source)
316 srcurl, branches = urlutil.parseurl(source)
317 srcrepo = repository(repo.ui, srcurl)
317 srcrepo = repository(repo.ui, srcurl)
318 repo.srcrepo = srcrepo
318 repo.srcrepo = srcrepo
319 return srcrepo
319 return srcrepo
320
320
321
321
322 def share(
322 def share(
323 ui,
323 ui,
324 source,
324 source,
325 dest=None,
325 dest=None,
326 update=True,
326 update=True,
327 bookmarks=True,
327 bookmarks=True,
328 defaultpath=None,
328 defaultpath=None,
329 relative=False,
329 relative=False,
330 ):
330 ):
331 '''create a shared repository'''
331 '''create a shared repository'''
332
332
333 not_local_msg = _(b'can only share local repositories')
333 not_local_msg = _(b'can only share local repositories')
334 if util.safehasattr(source, 'local'):
334 if util.safehasattr(source, 'local'):
335 if source.local() is None:
335 if source.local() is None:
336 raise error.Abort(not_local_msg)
336 raise error.Abort(not_local_msg)
337 elif not islocal(source):
337 elif not islocal(source):
338 # XXX why are we getting bytes here ?
338 # XXX why are we getting bytes here ?
339 raise error.Abort(not_local_msg)
339 raise error.Abort(not_local_msg)
340
340
341 if not dest:
341 if not dest:
342 dest = defaultdest(source)
342 dest = defaultdest(source)
343 else:
343 else:
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
345
345
346 if isinstance(source, bytes):
346 if isinstance(source, bytes):
347 source_path = urlutil.get_clone_path_obj(ui, source)
347 source_path = urlutil.get_clone_path_obj(ui, source)
348 srcrepo = repository(ui, source_path.loc)
348 srcrepo = repository(ui, source_path.loc)
349 branches = (source_path.branch, [])
349 branches = (source_path.branch, [])
350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
351 else:
351 else:
352 srcrepo = source.local()
352 srcrepo = source.local()
353 checkout = None
353 checkout = None
354
354
355 shareditems = set()
355 shareditems = set()
356 if bookmarks:
356 if bookmarks:
357 shareditems.add(sharedbookmarks)
357 shareditems.add(sharedbookmarks)
358
358
359 r = repository(
359 r = repository(
360 ui,
360 ui,
361 dest,
361 dest,
362 create=True,
362 create=True,
363 createopts={
363 createopts={
364 b'sharedrepo': srcrepo,
364 b'sharedrepo': srcrepo,
365 b'sharedrelative': relative,
365 b'sharedrelative': relative,
366 b'shareditems': shareditems,
366 b'shareditems': shareditems,
367 },
367 },
368 )
368 )
369
369
370 postshare(srcrepo, r, defaultpath=defaultpath)
370 postshare(srcrepo, r, defaultpath=defaultpath)
371 r = repository(ui, dest)
371 r = repository(ui, dest)
372 _postshareupdate(r, update, checkout=checkout)
372 _postshareupdate(r, update, checkout=checkout)
373 return r
373 return r
374
374
375
375
376 def _prependsourcehgrc(repo):
376 def _prependsourcehgrc(repo):
377 """copies the source repo config and prepend it in current repo .hg/hgrc
377 """copies the source repo config and prepend it in current repo .hg/hgrc
378 on unshare. This is only done if the share was perfomed using share safe
378 on unshare. This is only done if the share was perfomed using share safe
379 method where we share config of source in shares"""
379 method where we share config of source in shares"""
380 srcvfs = vfsmod.vfs(repo.sharedpath)
380 srcvfs = vfsmod.vfs(repo.sharedpath)
381 dstvfs = vfsmod.vfs(repo.path)
381 dstvfs = vfsmod.vfs(repo.path)
382
382
383 if not srcvfs.exists(b'hgrc'):
383 if not srcvfs.exists(b'hgrc'):
384 return
384 return
385
385
386 currentconfig = b''
386 currentconfig = b''
387 if dstvfs.exists(b'hgrc'):
387 if dstvfs.exists(b'hgrc'):
388 currentconfig = dstvfs.read(b'hgrc')
388 currentconfig = dstvfs.read(b'hgrc')
389
389
390 with dstvfs(b'hgrc', b'wb') as fp:
390 with dstvfs(b'hgrc', b'wb') as fp:
391 sourceconfig = srcvfs.read(b'hgrc')
391 sourceconfig = srcvfs.read(b'hgrc')
392 fp.write(b"# Config copied from shared source\n")
392 fp.write(b"# Config copied from shared source\n")
393 fp.write(sourceconfig)
393 fp.write(sourceconfig)
394 fp.write(b'\n')
394 fp.write(b'\n')
395 fp.write(currentconfig)
395 fp.write(currentconfig)
396
396
397
397
398 def unshare(ui, repo):
398 def unshare(ui, repo):
399 """convert a shared repository to a normal one
399 """convert a shared repository to a normal one
400
400
401 Copy the store data to the repo and remove the sharedpath data.
401 Copy the store data to the repo and remove the sharedpath data.
402
402
403 Returns a new repository object representing the unshared repository.
403 Returns a new repository object representing the unshared repository.
404
404
405 The passed repository object is not usable after this function is
405 The passed repository object is not usable after this function is
406 called.
406 called.
407 """
407 """
408
408
409 with repo.lock():
409 with repo.lock():
410 # we use locks here because if we race with commit, we
410 # we use locks here because if we race with commit, we
411 # can end up with extra data in the cloned revlogs that's
411 # can end up with extra data in the cloned revlogs that's
412 # not pointed to by changesets, thus causing verify to
412 # not pointed to by changesets, thus causing verify to
413 # fail
413 # fail
414 destlock = copystore(ui, repo, repo.path)
414 destlock = copystore(ui, repo, repo.path)
415 with destlock or util.nullcontextmanager():
415 with destlock or util.nullcontextmanager():
416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
417 # we were sharing .hg/hgrc of the share source with the current
417 # we were sharing .hg/hgrc of the share source with the current
418 # repo. We need to copy that while unsharing otherwise it can
418 # repo. We need to copy that while unsharing otherwise it can
419 # disable hooks and other checks
419 # disable hooks and other checks
420 _prependsourcehgrc(repo)
420 _prependsourcehgrc(repo)
421
421
422 sharefile = repo.vfs.join(b'sharedpath')
422 sharefile = repo.vfs.join(b'sharedpath')
423 util.rename(sharefile, sharefile + b'.old')
423 util.rename(sharefile, sharefile + b'.old')
424
424
425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
427 scmutil.writereporequirements(repo)
427 scmutil.writereporequirements(repo)
428
428
429 # Removing share changes some fundamental properties of the repo instance.
429 # Removing share changes some fundamental properties of the repo instance.
430 # So we instantiate a new repo object and operate on it rather than
430 # So we instantiate a new repo object and operate on it rather than
431 # try to keep the existing repo usable.
431 # try to keep the existing repo usable.
432 newrepo = repository(repo.baseui, repo.root, create=False)
432 newrepo = repository(repo.baseui, repo.root, create=False)
433
433
434 # TODO: figure out how to access subrepos that exist, but were previously
434 # TODO: figure out how to access subrepos that exist, but were previously
435 # removed from .hgsub
435 # removed from .hgsub
436 c = newrepo[b'.']
436 c = newrepo[b'.']
437 subs = c.substate
437 subs = c.substate
438 for s in sorted(subs):
438 for s in sorted(subs):
439 c.sub(s).unshare()
439 c.sub(s).unshare()
440
440
441 localrepo.poisonrepository(repo)
441 localrepo.poisonrepository(repo)
442
442
443 return newrepo
443 return newrepo
444
444
445
445
446 def postshare(sourcerepo, destrepo, defaultpath=None):
446 def postshare(sourcerepo, destrepo, defaultpath=None):
447 """Called after a new shared repo is created.
447 """Called after a new shared repo is created.
448
448
449 The new repo only has a requirements file and pointer to the source.
449 The new repo only has a requirements file and pointer to the source.
450 This function configures additional shared data.
450 This function configures additional shared data.
451
451
452 Extensions can wrap this function and write additional entries to
452 Extensions can wrap this function and write additional entries to
453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
454 """
454 """
455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
456 if default:
456 if default:
457 template = b'[paths]\ndefault = %s\n'
457 template = b'[paths]\ndefault = %s\n'
458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
460 with destrepo.wlock():
460 with destrepo.wlock():
461 narrowspec.copytoworkingcopy(destrepo)
461 narrowspec.copytoworkingcopy(destrepo)
462
462
463
463
464 def _postshareupdate(repo, update, checkout=None):
464 def _postshareupdate(repo, update, checkout=None):
465 """Maybe perform a working directory update after a shared repo is created.
465 """Maybe perform a working directory update after a shared repo is created.
466
466
467 ``update`` can be a boolean or a revision to update to.
467 ``update`` can be a boolean or a revision to update to.
468 """
468 """
469 if not update:
469 if not update:
470 return
470 return
471
471
472 repo.ui.status(_(b"updating working directory\n"))
472 repo.ui.status(_(b"updating working directory\n"))
473 if update is not True:
473 if update is not True:
474 checkout = update
474 checkout = update
475 for test in (checkout, b'default', b'tip'):
475 for test in (checkout, b'default', b'tip'):
476 if test is None:
476 if test is None:
477 continue
477 continue
478 try:
478 try:
479 uprev = repo.lookup(test)
479 uprev = repo.lookup(test)
480 break
480 break
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 continue
482 continue
483 _update(repo, uprev)
483 _update(repo, uprev)
484
484
485
485
486 def copystore(ui, srcrepo, destpath):
486 def copystore(ui, srcrepo, destpath):
487 """copy files from store of srcrepo in destpath
487 """copy files from store of srcrepo in destpath
488
488
489 returns destlock
489 returns destlock
490 """
490 """
491 destlock = None
491 destlock = None
492 try:
492 try:
493 hardlink = None
493 hardlink = None
494 topic = _(b'linking') if hardlink else _(b'copying')
494 topic = _(b'linking') if hardlink else _(b'copying')
495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
496 num = 0
496 num = 0
497 srcpublishing = srcrepo.publishing()
497 srcpublishing = srcrepo.publishing()
498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
499 dstvfs = vfsmod.vfs(destpath)
499 dstvfs = vfsmod.vfs(destpath)
500 for f in srcrepo.store.copylist():
500 for f in srcrepo.store.copylist():
501 if srcpublishing and f.endswith(b'phaseroots'):
501 if srcpublishing and f.endswith(b'phaseroots'):
502 continue
502 continue
503 dstbase = os.path.dirname(f)
503 dstbase = os.path.dirname(f)
504 if dstbase and not dstvfs.exists(dstbase):
504 if dstbase and not dstvfs.exists(dstbase):
505 dstvfs.mkdir(dstbase)
505 dstvfs.mkdir(dstbase)
506 if srcvfs.exists(f):
506 if srcvfs.exists(f):
507 if f.endswith(b'data'):
507 if f.endswith(b'data'):
508 # 'dstbase' may be empty (e.g. revlog format 0)
508 # 'dstbase' may be empty (e.g. revlog format 0)
509 lockfile = os.path.join(dstbase, b"lock")
509 lockfile = os.path.join(dstbase, b"lock")
510 # lock to avoid premature writing to the target
510 # lock to avoid premature writing to the target
511 destlock = lock.lock(dstvfs, lockfile)
511 destlock = lock.lock(dstvfs, lockfile)
512 hardlink, n = util.copyfiles(
512 hardlink, n = util.copyfiles(
513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
514 )
514 )
515 num += n
515 num += n
516 if hardlink:
516 if hardlink:
517 ui.debug(b"linked %d files\n" % num)
517 ui.debug(b"linked %d files\n" % num)
518 else:
518 else:
519 ui.debug(b"copied %d files\n" % num)
519 ui.debug(b"copied %d files\n" % num)
520 return destlock
520 return destlock
521 except: # re-raises
521 except: # re-raises
522 release(destlock)
522 release(destlock)
523 raise
523 raise
524
524
525
525
526 def clonewithshare(
526 def clonewithshare(
527 ui,
527 ui,
528 peeropts,
528 peeropts,
529 sharepath,
529 sharepath,
530 source,
530 source,
531 srcpeer,
531 srcpeer,
532 dest,
532 dest,
533 pull=False,
533 pull=False,
534 rev=None,
534 rev=None,
535 update=True,
535 update=True,
536 stream=False,
536 stream=False,
537 ):
537 ):
538 """Perform a clone using a shared repo.
538 """Perform a clone using a shared repo.
539
539
540 The store for the repository will be located at <sharepath>/.hg. The
540 The store for the repository will be located at <sharepath>/.hg. The
541 specified revisions will be cloned or pulled from "source". A shared repo
541 specified revisions will be cloned or pulled from "source". A shared repo
542 will be created at "dest" and a working copy will be created if "update" is
542 will be created at "dest" and a working copy will be created if "update" is
543 True.
543 True.
544 """
544 """
545 revs = None
545 revs = None
546 if rev:
546 if rev:
547 if not srcpeer.capable(b'lookup'):
547 if not srcpeer.capable(b'lookup'):
548 raise error.Abort(
548 raise error.Abort(
549 _(
549 _(
550 b"src repository does not support "
550 b"src repository does not support "
551 b"revision lookup and so doesn't "
551 b"revision lookup and so doesn't "
552 b"support clone by revision"
552 b"support clone by revision"
553 )
553 )
554 )
554 )
555
555
556 # TODO this is batchable.
556 # TODO this is batchable.
557 remoterevs = []
557 remoterevs = []
558 for r in rev:
558 for r in rev:
559 with srcpeer.commandexecutor() as e:
559 with srcpeer.commandexecutor() as e:
560 remoterevs.append(
560 remoterevs.append(
561 e.callcommand(
561 e.callcommand(
562 b'lookup',
562 b'lookup',
563 {
563 {
564 b'key': r,
564 b'key': r,
565 },
565 },
566 ).result()
566 ).result()
567 )
567 )
568 revs = remoterevs
568 revs = remoterevs
569
569
570 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # Obtain a lock before checking for or cloning the pooled repo otherwise
571 # 2 clients may race creating or populating it.
571 # 2 clients may race creating or populating it.
572 pooldir = os.path.dirname(sharepath)
572 pooldir = os.path.dirname(sharepath)
573 # lock class requires the directory to exist.
573 # lock class requires the directory to exist.
574 try:
574 try:
575 util.makedir(pooldir, False)
575 util.makedir(pooldir, False)
576 except FileExistsError:
576 except FileExistsError:
577 pass
577 pass
578
578
579 poolvfs = vfsmod.vfs(pooldir)
579 poolvfs = vfsmod.vfs(pooldir)
580 basename = os.path.basename(sharepath)
580 basename = os.path.basename(sharepath)
581
581
582 with lock.lock(poolvfs, b'%s.lock' % basename):
582 with lock.lock(poolvfs, b'%s.lock' % basename):
583 if os.path.exists(sharepath):
583 if os.path.exists(sharepath):
584 ui.status(
584 ui.status(
585 _(b'(sharing from existing pooled repository %s)\n') % basename
585 _(b'(sharing from existing pooled repository %s)\n') % basename
586 )
586 )
587 else:
587 else:
588 ui.status(
588 ui.status(
589 _(b'(sharing from new pooled repository %s)\n') % basename
589 _(b'(sharing from new pooled repository %s)\n') % basename
590 )
590 )
591 # Always use pull mode because hardlinks in share mode don't work
591 # Always use pull mode because hardlinks in share mode don't work
592 # well. Never update because working copies aren't necessary in
592 # well. Never update because working copies aren't necessary in
593 # share mode.
593 # share mode.
594 clone(
594 clone(
595 ui,
595 ui,
596 peeropts,
596 peeropts,
597 source,
597 source,
598 dest=sharepath,
598 dest=sharepath,
599 pull=True,
599 pull=True,
600 revs=rev,
600 revs=rev,
601 update=False,
601 update=False,
602 stream=stream,
602 stream=stream,
603 )
603 )
604
604
605 # Resolve the value to put in [paths] section for the source.
605 # Resolve the value to put in [paths] section for the source.
606 if islocal(source):
606 if islocal(source):
607 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 defaultpath = util.abspath(urlutil.urllocalpath(source))
608 else:
608 else:
609 defaultpath = source
609 defaultpath = source
610
610
611 sharerepo = repository(ui, path=sharepath)
611 sharerepo = repository(ui, path=sharepath)
612 destrepo = share(
612 destrepo = share(
613 ui,
613 ui,
614 sharerepo,
614 sharerepo,
615 dest=dest,
615 dest=dest,
616 update=False,
616 update=False,
617 bookmarks=False,
617 bookmarks=False,
618 defaultpath=defaultpath,
618 defaultpath=defaultpath,
619 )
619 )
620
620
621 # We need to perform a pull against the dest repo to fetch bookmarks
621 # We need to perform a pull against the dest repo to fetch bookmarks
622 # and other non-store data that isn't shared by default. In the case of
622 # and other non-store data that isn't shared by default. In the case of
623 # non-existing shared repo, this means we pull from the remote twice. This
623 # non-existing shared repo, this means we pull from the remote twice. This
624 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # is a bit weird. But at the time it was implemented, there wasn't an easy
625 # way to pull just non-changegroup data.
625 # way to pull just non-changegroup data.
626 exchange.pull(destrepo, srcpeer, heads=revs)
626 exchange.pull(destrepo, srcpeer, heads=revs)
627
627
628 _postshareupdate(destrepo, update)
628 _postshareupdate(destrepo, update)
629
629
630 return srcpeer, peer(ui, peeropts, dest)
630 return srcpeer, peer(ui, peeropts, dest)
631
631
632
632
633 # Recomputing caches is often slow on big repos, so copy them.
633 # Recomputing caches is often slow on big repos, so copy them.
634 def _copycache(srcrepo, dstcachedir, fname):
634 def _copycache(srcrepo, dstcachedir, fname):
635 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 """copy a cache from srcrepo to destcachedir (if it exists)"""
636 srcfname = srcrepo.cachevfs.join(fname)
636 srcfname = srcrepo.cachevfs.join(fname)
637 dstfname = os.path.join(dstcachedir, fname)
637 dstfname = os.path.join(dstcachedir, fname)
638 if os.path.exists(srcfname):
638 if os.path.exists(srcfname):
639 if not os.path.exists(dstcachedir):
639 if not os.path.exists(dstcachedir):
640 os.mkdir(dstcachedir)
640 os.mkdir(dstcachedir)
641 util.copyfile(srcfname, dstfname)
641 util.copyfile(srcfname, dstfname)
642
642
643
643
644 def clone(
644 def clone(
645 ui,
645 ui,
646 peeropts,
646 peeropts,
647 source,
647 source,
648 dest=None,
648 dest=None,
649 pull=False,
649 pull=False,
650 revs=None,
650 revs=None,
651 update=True,
651 update=True,
652 stream=False,
652 stream=False,
653 branch=None,
653 branch=None,
654 shareopts=None,
654 shareopts=None,
655 storeincludepats=None,
655 storeincludepats=None,
656 storeexcludepats=None,
656 storeexcludepats=None,
657 depth=None,
657 depth=None,
658 ):
658 ):
659 """Make a copy of an existing repository.
659 """Make a copy of an existing repository.
660
660
661 Create a copy of an existing repository in a new directory. The
661 Create a copy of an existing repository in a new directory. The
662 source and destination are URLs, as passed to the repository
662 source and destination are URLs, as passed to the repository
663 function. Returns a pair of repository peers, the source and
663 function. Returns a pair of repository peers, the source and
664 newly created destination.
664 newly created destination.
665
665
666 The location of the source is added to the new repository's
666 The location of the source is added to the new repository's
667 .hg/hgrc file, as the default to be used for future pulls and
667 .hg/hgrc file, as the default to be used for future pulls and
668 pushes.
668 pushes.
669
669
670 If an exception is raised, the partly cloned/updated destination
670 If an exception is raised, the partly cloned/updated destination
671 repository will be deleted.
671 repository will be deleted.
672
672
673 Arguments:
673 Arguments:
674
674
675 source: repository object or URL
675 source: repository object or URL
676
676
677 dest: URL of destination repository to create (defaults to base
677 dest: URL of destination repository to create (defaults to base
678 name of source repository)
678 name of source repository)
679
679
680 pull: always pull from source repository, even in local case or if the
680 pull: always pull from source repository, even in local case or if the
681 server prefers streaming
681 server prefers streaming
682
682
683 stream: stream raw data uncompressed from repository (fast over
683 stream: stream raw data uncompressed from repository (fast over
684 LAN, slow over WAN)
684 LAN, slow over WAN)
685
685
686 revs: revision to clone up to (implies pull=True)
686 revs: revision to clone up to (implies pull=True)
687
687
688 update: update working directory after clone completes, if
688 update: update working directory after clone completes, if
689 destination is local repository (True means update to default rev,
689 destination is local repository (True means update to default rev,
690 anything else is treated as a revision)
690 anything else is treated as a revision)
691
691
692 branch: branches to clone
692 branch: branches to clone
693
693
694 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 shareopts: dict of options to control auto sharing behavior. The "pool" key
695 activates auto sharing mode and defines the directory for stores. The
695 activates auto sharing mode and defines the directory for stores. The
696 "mode" key determines how to construct the directory name of the shared
696 "mode" key determines how to construct the directory name of the shared
697 repository. "identity" means the name is derived from the node of the first
697 repository. "identity" means the name is derived from the node of the first
698 changeset in the repository. "remote" means the name is derived from the
698 changeset in the repository. "remote" means the name is derived from the
699 remote's path/URL. Defaults to "identity."
699 remote's path/URL. Defaults to "identity."
700
700
701 storeincludepats and storeexcludepats: sets of file patterns to include and
701 storeincludepats and storeexcludepats: sets of file patterns to include and
702 exclude in the repository copy, respectively. If not defined, all files
702 exclude in the repository copy, respectively. If not defined, all files
703 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 will be included (a "full" clone). Otherwise a "narrow" clone containing
704 only the requested files will be performed. If ``storeincludepats`` is not
704 only the requested files will be performed. If ``storeincludepats`` is not
705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
706 ``path:.``. If both are empty sets, no files will be cloned.
706 ``path:.``. If both are empty sets, no files will be cloned.
707 """
707 """
708
708
709 if isinstance(source, bytes):
709 if isinstance(source, bytes):
710 src_path = urlutil.get_clone_path_obj(ui, source)
710 src_path = urlutil.get_clone_path_obj(ui, source)
711 if src_path is None:
711 if src_path is None:
712 srcpeer = peer(ui, peeropts, b'')
712 srcpeer = peer(ui, peeropts, b'')
713 origsource = source = b''
713 origsource = source = b''
714 branches = (None, branch or [])
714 branches = (None, branch or [])
715 else:
715 else:
716 srcpeer = peer(ui, peeropts, src_path)
716 srcpeer = peer(ui, peeropts, src_path)
717 origsource = src_path.rawloc
717 origsource = src_path.rawloc
718 branches = (src_path.branch, branch or [])
718 branches = (src_path.branch, branch or [])
719 source = src_path.loc
719 source = src_path.loc
720 else:
720 else:
721 if util.safehasattr(source, 'peer'):
721 if util.safehasattr(source, 'peer'):
722 srcpeer = source.peer() # in case we were called with a localrepo
722 srcpeer = source.peer() # in case we were called with a localrepo
723 else:
723 else:
724 srcpeer = source
724 srcpeer = source
725 branches = (None, branch or [])
725 branches = (None, branch or [])
726 # XXX path: simply use the peer `path` object when this become available
726 # XXX path: simply use the peer `path` object when this become available
727 origsource = source = srcpeer.url()
727 origsource = source = srcpeer.url()
728 srclock = destlock = destwlock = cleandir = None
728 srclock = destlock = destwlock = cleandir = None
729 destpeer = None
729 destpeer = None
730 try:
730 try:
731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
732
732
733 if dest is None:
733 if dest is None:
734 dest = defaultdest(source)
734 dest = defaultdest(source)
735 if dest:
735 if dest:
736 ui.status(_(b"destination directory: %s\n") % dest)
736 ui.status(_(b"destination directory: %s\n") % dest)
737 else:
737 else:
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 if dest_path is not None:
739 if dest_path is not None:
740 dest = dest_path.rawloc
740 dest = dest_path.rawloc
741 else:
741 else:
742 dest = b''
742 dest = b''
743
743
744 dest = urlutil.urllocalpath(dest)
744 dest = urlutil.urllocalpath(dest)
745 source = urlutil.urllocalpath(source)
745 source = urlutil.urllocalpath(source)
746
746
747 if not dest:
747 if not dest:
748 raise error.InputError(_(b"empty destination path is not valid"))
748 raise error.InputError(_(b"empty destination path is not valid"))
749
749
750 destvfs = vfsmod.vfs(dest, expandpath=True)
750 destvfs = vfsmod.vfs(dest, expandpath=True)
751 if destvfs.lexists():
751 if destvfs.lexists():
752 if not destvfs.isdir():
752 if not destvfs.isdir():
753 raise error.InputError(
753 raise error.InputError(
754 _(b"destination '%s' already exists") % dest
754 _(b"destination '%s' already exists") % dest
755 )
755 )
756 elif destvfs.listdir():
756 elif destvfs.listdir():
757 raise error.InputError(
757 raise error.InputError(
758 _(b"destination '%s' is not empty") % dest
758 _(b"destination '%s' is not empty") % dest
759 )
759 )
760
760
761 createopts = {}
761 createopts = {}
762 narrow = False
762 narrow = False
763
763
764 if storeincludepats is not None:
764 if storeincludepats is not None:
765 narrowspec.validatepatterns(storeincludepats)
765 narrowspec.validatepatterns(storeincludepats)
766 narrow = True
766 narrow = True
767
767
768 if storeexcludepats is not None:
768 if storeexcludepats is not None:
769 narrowspec.validatepatterns(storeexcludepats)
769 narrowspec.validatepatterns(storeexcludepats)
770 narrow = True
770 narrow = True
771
771
772 if narrow:
772 if narrow:
773 # Include everything by default if only exclusion patterns defined.
773 # Include everything by default if only exclusion patterns defined.
774 if storeexcludepats and not storeincludepats:
774 if storeexcludepats and not storeincludepats:
775 storeincludepats = {b'path:.'}
775 storeincludepats = {b'path:.'}
776
776
777 createopts[b'narrowfiles'] = True
777 createopts[b'narrowfiles'] = True
778
778
779 if depth:
779 if depth:
780 createopts[b'shallowfilestore'] = True
780 createopts[b'shallowfilestore'] = True
781
781
782 if srcpeer.capable(b'lfs-serve'):
782 if srcpeer.capable(b'lfs-serve'):
783 # Repository creation honors the config if it disabled the extension, so
783 # Repository creation honors the config if it disabled the extension, so
784 # we can't just announce that lfs will be enabled. This check avoids
784 # we can't just announce that lfs will be enabled. This check avoids
785 # saying that lfs will be enabled, and then saying it's an unknown
785 # saying that lfs will be enabled, and then saying it's an unknown
786 # feature. The lfs creation option is set in either case so that a
786 # feature. The lfs creation option is set in either case so that a
787 # requirement is added. If the extension is explicitly disabled but the
787 # requirement is added. If the extension is explicitly disabled but the
788 # requirement is set, the clone aborts early, before transferring any
788 # requirement is set, the clone aborts early, before transferring any
789 # data.
789 # data.
790 createopts[b'lfs'] = True
790 createopts[b'lfs'] = True
791
791
792 if extensions.disabled_help(b'lfs'):
792 if extensions.disabled_help(b'lfs'):
793 ui.status(
793 ui.status(
794 _(
794 _(
795 b'(remote is using large file support (lfs), but it is '
795 b'(remote is using large file support (lfs), but it is '
796 b'explicitly disabled in the local configuration)\n'
796 b'explicitly disabled in the local configuration)\n'
797 )
797 )
798 )
798 )
799 else:
799 else:
800 ui.status(
800 ui.status(
801 _(
801 _(
802 b'(remote is using large file support (lfs); lfs will '
802 b'(remote is using large file support (lfs); lfs will '
803 b'be enabled for this repository)\n'
803 b'be enabled for this repository)\n'
804 )
804 )
805 )
805 )
806
806
807 shareopts = shareopts or {}
807 shareopts = shareopts or {}
808 sharepool = shareopts.get(b'pool')
808 sharepool = shareopts.get(b'pool')
809 sharenamemode = shareopts.get(b'mode')
809 sharenamemode = shareopts.get(b'mode')
810 if sharepool and islocal(dest):
810 if sharepool and islocal(dest):
811 sharepath = None
811 sharepath = None
812 if sharenamemode == b'identity':
812 if sharenamemode == b'identity':
813 # Resolve the name from the initial changeset in the remote
813 # Resolve the name from the initial changeset in the remote
814 # repository. This returns nullid when the remote is empty. It
814 # repository. This returns nullid when the remote is empty. It
815 # raises RepoLookupError if revision 0 is filtered or otherwise
815 # raises RepoLookupError if revision 0 is filtered or otherwise
816 # not available. If we fail to resolve, sharing is not enabled.
816 # not available. If we fail to resolve, sharing is not enabled.
817 try:
817 try:
818 with srcpeer.commandexecutor() as e:
818 with srcpeer.commandexecutor() as e:
819 rootnode = e.callcommand(
819 rootnode = e.callcommand(
820 b'lookup',
820 b'lookup',
821 {
821 {
822 b'key': b'0',
822 b'key': b'0',
823 },
823 },
824 ).result()
824 ).result()
825
825
826 if rootnode != sha1nodeconstants.nullid:
826 if rootnode != sha1nodeconstants.nullid:
827 sharepath = os.path.join(sharepool, hex(rootnode))
827 sharepath = os.path.join(sharepool, hex(rootnode))
828 else:
828 else:
829 ui.status(
829 ui.status(
830 _(
830 _(
831 b'(not using pooled storage: '
831 b'(not using pooled storage: '
832 b'remote appears to be empty)\n'
832 b'remote appears to be empty)\n'
833 )
833 )
834 )
834 )
835 except error.RepoLookupError:
835 except error.RepoLookupError:
836 ui.status(
836 ui.status(
837 _(
837 _(
838 b'(not using pooled storage: '
838 b'(not using pooled storage: '
839 b'unable to resolve identity of remote)\n'
839 b'unable to resolve identity of remote)\n'
840 )
840 )
841 )
841 )
842 elif sharenamemode == b'remote':
842 elif sharenamemode == b'remote':
843 sharepath = os.path.join(
843 sharepath = os.path.join(
844 sharepool, hex(hashutil.sha1(source).digest())
844 sharepool, hex(hashutil.sha1(source).digest())
845 )
845 )
846 else:
846 else:
847 raise error.Abort(
847 raise error.Abort(
848 _(b'unknown share naming mode: %s') % sharenamemode
848 _(b'unknown share naming mode: %s') % sharenamemode
849 )
849 )
850
850
851 # TODO this is a somewhat arbitrary restriction.
851 # TODO this is a somewhat arbitrary restriction.
852 if narrow:
852 if narrow:
853 ui.status(
853 ui.status(
854 _(b'(pooled storage not supported for narrow clones)\n')
854 _(b'(pooled storage not supported for narrow clones)\n')
855 )
855 )
856 sharepath = None
856 sharepath = None
857
857
858 if sharepath:
858 if sharepath:
859 return clonewithshare(
859 return clonewithshare(
860 ui,
860 ui,
861 peeropts,
861 peeropts,
862 sharepath,
862 sharepath,
863 source,
863 source,
864 srcpeer,
864 srcpeer,
865 dest,
865 dest,
866 pull=pull,
866 pull=pull,
867 rev=revs,
867 rev=revs,
868 update=update,
868 update=update,
869 stream=stream,
869 stream=stream,
870 )
870 )
871
871
872 srcrepo = srcpeer.local()
872 srcrepo = srcpeer.local()
873
873
874 abspath = origsource
874 abspath = origsource
875 if islocal(origsource):
875 if islocal(origsource):
876 abspath = util.abspath(urlutil.urllocalpath(origsource))
876 abspath = util.abspath(urlutil.urllocalpath(origsource))
877
877
878 if islocal(dest):
878 if islocal(dest):
879 if os.path.exists(dest):
879 if os.path.exists(dest):
880 # only clean up directories we create ourselves
880 # only clean up directories we create ourselves
881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
882 cleandir = hgdir
882 cleandir = hgdir
883 else:
883 else:
884 cleandir = dest
884 cleandir = dest
885
885
886 copy = False
886 copy = False
887 if (
887 if (
888 srcrepo
888 srcrepo
889 and srcrepo.cancopy()
889 and srcrepo.cancopy()
890 and islocal(dest)
890 and islocal(dest)
891 and not phases.hassecret(srcrepo)
891 and not phases.hassecret(srcrepo)
892 ):
892 ):
893 copy = not pull and not revs
893 copy = not pull and not revs
894
894
895 # TODO this is a somewhat arbitrary restriction.
895 # TODO this is a somewhat arbitrary restriction.
896 if narrow:
896 if narrow:
897 copy = False
897 copy = False
898
898
899 if copy:
899 if copy:
900 try:
900 try:
901 # we use a lock here because if we race with commit, we
901 # we use a lock here because if we race with commit, we
902 # can end up with extra data in the cloned revlogs that's
902 # can end up with extra data in the cloned revlogs that's
903 # not pointed to by changesets, thus causing verify to
903 # not pointed to by changesets, thus causing verify to
904 # fail
904 # fail
905 srclock = srcrepo.lock(wait=False)
905 srclock = srcrepo.lock(wait=False)
906 except error.LockError:
906 except error.LockError:
907 copy = False
907 copy = False
908
908
909 if copy:
909 if copy:
910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
911
911
912 destrootpath = urlutil.urllocalpath(dest)
912 destrootpath = urlutil.urllocalpath(dest)
913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
914 localrepo.createrepository(
914 localrepo.createrepository(
915 ui,
915 ui,
916 destrootpath,
916 destrootpath,
917 requirements=dest_reqs,
917 requirements=dest_reqs,
918 )
918 )
919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
920
920
921 destwlock = destrepo.wlock()
921 destwlock = destrepo.wlock()
922 destlock = destrepo.lock()
922 destlock = destrepo.lock()
923 from . import streamclone # avoid cycle
923 from . import streamclone # avoid cycle
924
924
925 streamclone.local_copy(srcrepo, destrepo)
925 streamclone.local_copy(srcrepo, destrepo)
926
926
927 # we need to re-init the repo after manually copying the data
927 # we need to re-init the repo after manually copying the data
928 # into it
928 # into it
929 destpeer = peer(srcrepo, peeropts, dest)
929 destpeer = peer(srcrepo, peeropts, dest)
930
930
931 # make the peer aware that is it already locked
931 # make the peer aware that is it already locked
932 #
932 #
933 # important:
933 # important:
934 #
934 #
935 # We still need to release that lock at the end of the function
935 # We still need to release that lock at the end of the function
936 destpeer.local()._lockref = weakref.ref(destlock)
936 destpeer.local()._lockref = weakref.ref(destlock)
937 destpeer.local()._wlockref = weakref.ref(destwlock)
937 destpeer.local()._wlockref = weakref.ref(destwlock)
938 # dirstate also needs to be copied because `_wlockref` has a reference
938 # dirstate also needs to be copied because `_wlockref` has a reference
939 # to it: this dirstate is saved to disk when the wlock is released
939 # to it: this dirstate is saved to disk when the wlock is released
940 destpeer.local().dirstate = destrepo.dirstate
940 destpeer.local().dirstate = destrepo.dirstate
941
941
942 srcrepo.hook(
942 srcrepo.hook(
943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
944 )
944 )
945 else:
945 else:
946 try:
946 try:
947 # only pass ui when no srcrepo
947 # only pass ui when no srcrepo
948 destpeer = peer(
948 destpeer = peer(
949 srcrepo or ui,
949 srcrepo or ui,
950 peeropts,
950 peeropts,
951 dest,
951 dest,
952 create=True,
952 create=True,
953 createopts=createopts,
953 createopts=createopts,
954 )
954 )
955 except FileExistsError:
955 except FileExistsError:
956 cleandir = None
956 cleandir = None
957 raise error.Abort(_(b"destination '%s' already exists") % dest)
957 raise error.Abort(_(b"destination '%s' already exists") % dest)
958
958
959 if revs:
959 if revs:
960 if not srcpeer.capable(b'lookup'):
960 if not srcpeer.capable(b'lookup'):
961 raise error.Abort(
961 raise error.Abort(
962 _(
962 _(
963 b"src repository does not support "
963 b"src repository does not support "
964 b"revision lookup and so doesn't "
964 b"revision lookup and so doesn't "
965 b"support clone by revision"
965 b"support clone by revision"
966 )
966 )
967 )
967 )
968
968
969 # TODO this is batchable.
969 # TODO this is batchable.
970 remoterevs = []
970 remoterevs = []
971 for rev in revs:
971 for rev in revs:
972 with srcpeer.commandexecutor() as e:
972 with srcpeer.commandexecutor() as e:
973 remoterevs.append(
973 remoterevs.append(
974 e.callcommand(
974 e.callcommand(
975 b'lookup',
975 b'lookup',
976 {
976 {
977 b'key': rev,
977 b'key': rev,
978 },
978 },
979 ).result()
979 ).result()
980 )
980 )
981 revs = remoterevs
981 revs = remoterevs
982
982
983 checkout = revs[0]
983 checkout = revs[0]
984 else:
984 else:
985 revs = None
985 revs = None
986 local = destpeer.local()
986 local = destpeer.local()
987 if local:
987 if local:
988 if narrow:
988 if narrow:
989 with local.wlock(), local.lock():
989 with local.wlock(), local.lock():
990 local.setnarrowpats(storeincludepats, storeexcludepats)
990 local.setnarrowpats(storeincludepats, storeexcludepats)
991 narrowspec.copytoworkingcopy(local)
991 narrowspec.copytoworkingcopy(local)
992
992
993 u = urlutil.url(abspath)
993 u = urlutil.url(abspath)
994 defaulturl = bytes(u)
994 defaulturl = bytes(u)
995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
996 if not stream:
996 if not stream:
997 if pull:
997 if pull:
998 stream = False
998 stream = False
999 else:
999 else:
1000 stream = None
1000 stream = None
1001 # internal config: ui.quietbookmarkmove
1001 # internal config: ui.quietbookmarkmove
1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1003 with local.ui.configoverride(overrides, b'clone'):
1003 with local.ui.configoverride(overrides, b'clone'):
1004 exchange.pull(
1004 exchange.pull(
1005 local,
1005 local,
1006 srcpeer,
1006 srcpeer,
1007 heads=revs,
1007 heads=revs,
1008 streamclonerequested=stream,
1008 streamclonerequested=stream,
1009 includepats=storeincludepats,
1009 includepats=storeincludepats,
1010 excludepats=storeexcludepats,
1010 excludepats=storeexcludepats,
1011 depth=depth,
1011 depth=depth,
1012 )
1012 )
1013 elif srcrepo:
1013 elif srcrepo:
1014 # TODO lift restriction once exchange.push() accepts narrow
1014 # TODO lift restriction once exchange.push() accepts narrow
1015 # push.
1015 # push.
1016 if narrow:
1016 if narrow:
1017 raise error.Abort(
1017 raise error.Abort(
1018 _(
1018 _(
1019 b'narrow clone not available for '
1019 b'narrow clone not available for '
1020 b'remote destinations'
1020 b'remote destinations'
1021 )
1021 )
1022 )
1022 )
1023
1023
1024 exchange.push(
1024 exchange.push(
1025 srcrepo,
1025 srcrepo,
1026 destpeer,
1026 destpeer,
1027 revs=revs,
1027 revs=revs,
1028 bookmarks=srcrepo._bookmarks.keys(),
1028 bookmarks=srcrepo._bookmarks.keys(),
1029 )
1029 )
1030 else:
1030 else:
1031 raise error.Abort(
1031 raise error.Abort(
1032 _(b"clone from remote to remote not supported")
1032 _(b"clone from remote to remote not supported")
1033 )
1033 )
1034
1034
1035 cleandir = None
1035 cleandir = None
1036
1036
1037 destrepo = destpeer.local()
1037 destrepo = destpeer.local()
1038 if destrepo:
1038 if destrepo:
1039 template = uimod.samplehgrcs[b'cloned']
1039 template = uimod.samplehgrcs[b'cloned']
1040 u = urlutil.url(abspath)
1040 u = urlutil.url(abspath)
1041 u.passwd = None
1041 u.passwd = None
1042 defaulturl = bytes(u)
1042 defaulturl = bytes(u)
1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1045
1045
1046 if ui.configbool(b'experimental', b'remotenames'):
1046 if ui.configbool(b'experimental', b'remotenames'):
1047 logexchange.pullremotenames(destrepo, srcpeer)
1047 logexchange.pullremotenames(destrepo, srcpeer)
1048
1048
1049 if update:
1049 if update:
1050 if update is not True:
1050 if update is not True:
1051 with srcpeer.commandexecutor() as e:
1051 with srcpeer.commandexecutor() as e:
1052 checkout = e.callcommand(
1052 checkout = e.callcommand(
1053 b'lookup',
1053 b'lookup',
1054 {
1054 {
1055 b'key': update,
1055 b'key': update,
1056 },
1056 },
1057 ).result()
1057 ).result()
1058
1058
1059 uprev = None
1059 uprev = None
1060 status = None
1060 status = None
1061 if checkout is not None:
1061 if checkout is not None:
1062 # Some extensions (at least hg-git and hg-subversion) have
1062 # Some extensions (at least hg-git and hg-subversion) have
1063 # a peer.lookup() implementation that returns a name instead
1063 # a peer.lookup() implementation that returns a name instead
1064 # of a nodeid. We work around it here until we've figured
1064 # of a nodeid. We work around it here until we've figured
1065 # out a better solution.
1065 # out a better solution.
1066 if len(checkout) == 20 and checkout in destrepo:
1066 if len(checkout) == 20 and checkout in destrepo:
1067 uprev = checkout
1067 uprev = checkout
1068 elif scmutil.isrevsymbol(destrepo, checkout):
1068 elif scmutil.isrevsymbol(destrepo, checkout):
1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1070 else:
1070 else:
1071 if update is not True:
1071 if update is not True:
1072 try:
1072 try:
1073 uprev = destrepo.lookup(update)
1073 uprev = destrepo.lookup(update)
1074 except error.RepoLookupError:
1074 except error.RepoLookupError:
1075 pass
1075 pass
1076 if uprev is None:
1076 if uprev is None:
1077 try:
1077 try:
1078 if destrepo._activebookmark:
1078 if destrepo._activebookmark:
1079 uprev = destrepo.lookup(destrepo._activebookmark)
1079 uprev = destrepo.lookup(destrepo._activebookmark)
1080 update = destrepo._activebookmark
1080 update = destrepo._activebookmark
1081 else:
1081 else:
1082 uprev = destrepo._bookmarks[b'@']
1082 uprev = destrepo._bookmarks[b'@']
1083 update = b'@'
1083 update = b'@'
1084 bn = destrepo[uprev].branch()
1084 bn = destrepo[uprev].branch()
1085 if bn == b'default':
1085 if bn == b'default':
1086 status = _(b"updating to bookmark %s\n" % update)
1086 status = _(b"updating to bookmark %s\n" % update)
1087 else:
1087 else:
1088 status = (
1088 status = (
1089 _(b"updating to bookmark %s on branch %s\n")
1089 _(b"updating to bookmark %s on branch %s\n")
1090 ) % (update, bn)
1090 ) % (update, bn)
1091 except KeyError:
1091 except KeyError:
1092 try:
1092 try:
1093 uprev = destrepo.branchtip(b'default')
1093 uprev = destrepo.branchtip(b'default')
1094 except error.RepoLookupError:
1094 except error.RepoLookupError:
1095 uprev = destrepo.lookup(b'tip')
1095 uprev = destrepo.lookup(b'tip')
1096 if not status:
1096 if not status:
1097 bn = destrepo[uprev].branch()
1097 bn = destrepo[uprev].branch()
1098 status = _(b"updating to branch %s\n") % bn
1098 status = _(b"updating to branch %s\n") % bn
1099 destrepo.ui.status(status)
1099 destrepo.ui.status(status)
1100 _update(destrepo, uprev)
1100 _update(destrepo, uprev)
1101 if update in destrepo._bookmarks:
1101 if update in destrepo._bookmarks:
1102 bookmarks.activate(destrepo, update)
1102 bookmarks.activate(destrepo, update)
1103 if destlock is not None:
1103 if destlock is not None:
1104 release(destlock)
1104 release(destlock)
1105 if destwlock is not None:
1105 if destwlock is not None:
1106 release(destlock)
1106 release(destlock)
1107 # here is a tiny windows were someone could end up writing the
1107 # here is a tiny windows were someone could end up writing the
1108 # repository before the cache are sure to be warm. This is "fine"
1108 # repository before the cache are sure to be warm. This is "fine"
1109 # as the only "bad" outcome would be some slowness. That potential
1109 # as the only "bad" outcome would be some slowness. That potential
1110 # slowness already affect reader.
1110 # slowness already affect reader.
1111 with destrepo.lock():
1111 with destrepo.lock():
1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1113 finally:
1113 finally:
1114 release(srclock, destlock, destwlock)
1114 release(srclock, destlock, destwlock)
1115 if cleandir is not None:
1115 if cleandir is not None:
1116 shutil.rmtree(cleandir, True)
1116 shutil.rmtree(cleandir, True)
1117 if srcpeer is not None:
1117 if srcpeer is not None:
1118 srcpeer.close()
1118 srcpeer.close()
1119 if destpeer and destpeer.local() is None:
1119 if destpeer and destpeer.local() is None:
1120 destpeer.close()
1120 destpeer.close()
1121 return srcpeer, destpeer
1121 return srcpeer, destpeer
1122
1122
1123
1123
1124 def _showstats(repo, stats, quietempty=False):
1124 def _showstats(repo, stats, quietempty=False):
1125 if quietempty and stats.isempty():
1125 if quietempty and stats.isempty():
1126 return
1126 return
1127 repo.ui.status(
1127 repo.ui.status(
1128 _(
1128 _(
1129 b"%d files updated, %d files merged, "
1129 b"%d files updated, %d files merged, "
1130 b"%d files removed, %d files unresolved\n"
1130 b"%d files removed, %d files unresolved\n"
1131 )
1131 )
1132 % (
1132 % (
1133 stats.updatedcount,
1133 stats.updatedcount,
1134 stats.mergedcount,
1134 stats.mergedcount,
1135 stats.removedcount,
1135 stats.removedcount,
1136 stats.unresolvedcount,
1136 stats.unresolvedcount,
1137 )
1137 )
1138 )
1138 )
1139
1139
1140
1140
1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1142 """Update the working directory to node.
1142 """Update the working directory to node.
1143
1143
1144 When overwrite is set, changes are clobbered, merged else
1144 When overwrite is set, changes are clobbered, merged else
1145
1145
1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1147 repo.ui.deprecwarn(
1147 repo.ui.deprecwarn(
1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1149 b'5.7',
1149 b'5.7',
1150 )
1150 )
1151 return mergemod._update(
1151 return mergemod._update(
1152 repo,
1152 repo,
1153 node,
1153 node,
1154 branchmerge=False,
1154 branchmerge=False,
1155 force=overwrite,
1155 force=overwrite,
1156 labels=[b'working copy', b'destination'],
1156 labels=[b'working copy', b'destination'],
1157 updatecheck=updatecheck,
1157 updatecheck=updatecheck,
1158 )
1158 )
1159
1159
1160
1160
1161 def update(repo, node, quietempty=False, updatecheck=None):
1161 def update(repo, node, quietempty=False, updatecheck=None):
1162 """update the working directory to node"""
1162 """update the working directory to node"""
1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1164 _showstats(repo, stats, quietempty)
1164 _showstats(repo, stats, quietempty)
1165 if stats.unresolvedcount:
1165 if stats.unresolvedcount:
1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1167 return stats.unresolvedcount > 0
1167 return stats.unresolvedcount > 0
1168
1168
1169
1169
1170 # naming conflict in clone()
1170 # naming conflict in clone()
1171 _update = update
1171 _update = update
1172
1172
1173
1173
1174 def clean(repo, node, show_stats=True, quietempty=False):
1174 def clean(repo, node, show_stats=True, quietempty=False):
1175 """forcibly switch the working directory to node, clobbering changes"""
1175 """forcibly switch the working directory to node, clobbering changes"""
1176 stats = mergemod.clean_update(repo[node])
1176 stats = mergemod.clean_update(repo[node])
1177 assert stats.unresolvedcount == 0
1177 assert stats.unresolvedcount == 0
1178 if show_stats:
1178 if show_stats:
1179 _showstats(repo, stats, quietempty)
1179 _showstats(repo, stats, quietempty)
1180 return False
1180 return False
1181
1181
1182
1182
1183 # naming conflict in updatetotally()
1183 # naming conflict in updatetotally()
1184 _clean = clean
1184 _clean = clean
1185
1185
1186 _VALID_UPDATECHECKS = {
1186 _VALID_UPDATECHECKS = {
1187 mergemod.UPDATECHECK_ABORT,
1187 mergemod.UPDATECHECK_ABORT,
1188 mergemod.UPDATECHECK_NONE,
1188 mergemod.UPDATECHECK_NONE,
1189 mergemod.UPDATECHECK_LINEAR,
1189 mergemod.UPDATECHECK_LINEAR,
1190 mergemod.UPDATECHECK_NO_CONFLICT,
1190 mergemod.UPDATECHECK_NO_CONFLICT,
1191 }
1191 }
1192
1192
1193
1193
1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1195 """Update the working directory with extra care for non-file components
1195 """Update the working directory with extra care for non-file components
1196
1196
1197 This takes care of non-file components below:
1197 This takes care of non-file components below:
1198
1198
1199 :bookmark: might be advanced or (in)activated
1199 :bookmark: might be advanced or (in)activated
1200
1200
1201 This takes arguments below:
1201 This takes arguments below:
1202
1202
1203 :checkout: to which revision the working directory is updated
1203 :checkout: to which revision the working directory is updated
1204 :brev: a name, which might be a bookmark to be activated after updating
1204 :brev: a name, which might be a bookmark to be activated after updating
1205 :clean: whether changes in the working directory can be discarded
1205 :clean: whether changes in the working directory can be discarded
1206 :updatecheck: how to deal with a dirty working directory
1206 :updatecheck: how to deal with a dirty working directory
1207
1207
1208 Valid values for updatecheck are the UPDATECHECK_* constants
1208 Valid values for updatecheck are the UPDATECHECK_* constants
1209 defined in the merge module. Passing `None` will result in using the
1209 defined in the merge module. Passing `None` will result in using the
1210 configured default.
1210 configured default.
1211
1211
1212 * ABORT: abort if the working directory is dirty
1212 * ABORT: abort if the working directory is dirty
1213 * NONE: don't check (merge working directory changes into destination)
1213 * NONE: don't check (merge working directory changes into destination)
1214 * LINEAR: check that update is linear before merging working directory
1214 * LINEAR: check that update is linear before merging working directory
1215 changes into destination
1215 changes into destination
1216 * NO_CONFLICT: check that the update does not result in file merges
1216 * NO_CONFLICT: check that the update does not result in file merges
1217
1217
1218 This returns whether conflict is detected at updating or not.
1218 This returns whether conflict is detected at updating or not.
1219 """
1219 """
1220 if updatecheck is None:
1220 if updatecheck is None:
1221 updatecheck = ui.config(b'commands', b'update.check')
1221 updatecheck = ui.config(b'commands', b'update.check')
1222 if updatecheck not in _VALID_UPDATECHECKS:
1222 if updatecheck not in _VALID_UPDATECHECKS:
1223 # If not configured, or invalid value configured
1223 # If not configured, or invalid value configured
1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1225 if updatecheck not in _VALID_UPDATECHECKS:
1225 if updatecheck not in _VALID_UPDATECHECKS:
1226 raise ValueError(
1226 raise ValueError(
1227 r'Invalid updatecheck value %r (can accept %r)'
1227 r'Invalid updatecheck value %r (can accept %r)'
1228 % (updatecheck, _VALID_UPDATECHECKS)
1228 % (updatecheck, _VALID_UPDATECHECKS)
1229 )
1229 )
1230 with repo.wlock():
1230 with repo.wlock():
1231 movemarkfrom = None
1231 movemarkfrom = None
1232 warndest = False
1232 warndest = False
1233 if checkout is None:
1233 if checkout is None:
1234 updata = destutil.destupdate(repo, clean=clean)
1234 updata = destutil.destupdate(repo, clean=clean)
1235 checkout, movemarkfrom, brev = updata
1235 checkout, movemarkfrom, brev = updata
1236 warndest = True
1236 warndest = True
1237
1237
1238 if clean:
1238 if clean:
1239 ret = _clean(repo, checkout)
1239 ret = _clean(repo, checkout)
1240 else:
1240 else:
1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1242 cmdutil.bailifchanged(repo, merge=False)
1242 cmdutil.bailifchanged(repo, merge=False)
1243 updatecheck = mergemod.UPDATECHECK_NONE
1243 updatecheck = mergemod.UPDATECHECK_NONE
1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1245
1245
1246 if not ret and movemarkfrom:
1246 if not ret and movemarkfrom:
1247 if movemarkfrom == repo[b'.'].node():
1247 if movemarkfrom == repo[b'.'].node():
1248 pass # no-op update
1248 pass # no-op update
1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1251 ui.status(_(b"updating bookmark %s\n") % b)
1251 ui.status(_(b"updating bookmark %s\n") % b)
1252 else:
1252 else:
1253 # this can happen with a non-linear update
1253 # this can happen with a non-linear update
1254 b = ui.label(repo._activebookmark, b'bookmarks')
1254 b = ui.label(repo._activebookmark, b'bookmarks')
1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1256 bookmarks.deactivate(repo)
1256 bookmarks.deactivate(repo)
1257 elif brev in repo._bookmarks:
1257 elif brev in repo._bookmarks:
1258 if brev != repo._activebookmark:
1258 if brev != repo._activebookmark:
1259 b = ui.label(brev, b'bookmarks.active')
1259 b = ui.label(brev, b'bookmarks.active')
1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1261 bookmarks.activate(repo, brev)
1261 bookmarks.activate(repo, brev)
1262 elif brev:
1262 elif brev:
1263 if repo._activebookmark:
1263 if repo._activebookmark:
1264 b = ui.label(repo._activebookmark, b'bookmarks')
1264 b = ui.label(repo._activebookmark, b'bookmarks')
1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1266 bookmarks.deactivate(repo)
1266 bookmarks.deactivate(repo)
1267
1267
1268 if warndest:
1268 if warndest:
1269 destutil.statusotherdests(ui, repo)
1269 destutil.statusotherdests(ui, repo)
1270
1270
1271 return ret
1271 return ret
1272
1272
1273
1273
1274 def merge(
1274 def merge(
1275 ctx,
1275 ctx,
1276 force=False,
1276 force=False,
1277 remind=True,
1277 remind=True,
1278 labels=None,
1278 labels=None,
1279 ):
1279 ):
1280 """Branch merge with node, resolving changes. Return true if any
1280 """Branch merge with node, resolving changes. Return true if any
1281 unresolved conflicts."""
1281 unresolved conflicts."""
1282 repo = ctx.repo()
1282 repo = ctx.repo()
1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1284 _showstats(repo, stats)
1284 _showstats(repo, stats)
1285 if stats.unresolvedcount:
1285 if stats.unresolvedcount:
1286 repo.ui.status(
1286 repo.ui.status(
1287 _(
1287 _(
1288 b"use 'hg resolve' to retry unresolved file merges "
1288 b"use 'hg resolve' to retry unresolved file merges "
1289 b"or 'hg merge --abort' to abandon\n"
1289 b"or 'hg merge --abort' to abandon\n"
1290 )
1290 )
1291 )
1291 )
1292 elif remind:
1292 elif remind:
1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1294 return stats.unresolvedcount > 0
1294 return stats.unresolvedcount > 0
1295
1295
1296
1296
1297 def abortmerge(ui, repo):
1297 def abortmerge(ui, repo):
1298 ms = mergestatemod.mergestate.read(repo)
1298 ms = mergestatemod.mergestate.read(repo)
1299 if ms.active():
1299 if ms.active():
1300 # there were conflicts
1300 # there were conflicts
1301 node = ms.localctx.hex()
1301 node = ms.localctx.hex()
1302 else:
1302 else:
1303 # there were no conficts, mergestate was not stored
1303 # there were no conficts, mergestate was not stored
1304 node = repo[b'.'].hex()
1304 node = repo[b'.'].hex()
1305
1305
1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1307 stats = mergemod.clean_update(repo[node])
1307 stats = mergemod.clean_update(repo[node])
1308 assert stats.unresolvedcount == 0
1308 assert stats.unresolvedcount == 0
1309 _showstats(repo, stats)
1309 _showstats(repo, stats)
1310
1310
1311
1311
1312 def _incoming(
1312 def _incoming(
1313 displaychlist,
1313 displaychlist,
1314 subreporecurse,
1314 subreporecurse,
1315 ui,
1315 ui,
1316 repo,
1316 repo,
1317 source,
1317 source,
1318 opts,
1318 opts,
1319 buffered=False,
1319 buffered=False,
1320 subpath=None,
1320 subpath=None,
1321 ):
1321 ):
1322 """
1322 """
1323 Helper for incoming / gincoming.
1323 Helper for incoming / gincoming.
1324 displaychlist gets called with
1324 displaychlist gets called with
1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1326 and is supposed to contain only code that can't be unified.
1326 and is supposed to contain only code that can't be unified.
1327 """
1327 """
1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1329 srcs = list(srcs)
1329 srcs = list(srcs)
1330 if len(srcs) != 1:
1330 if len(srcs) != 1:
1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1332 msg %= len(srcs)
1332 msg %= len(srcs)
1333 raise error.Abort(msg)
1333 raise error.Abort(msg)
1334 path = srcs[0]
1334 path = srcs[0]
1335 if subpath is None:
1335 if subpath is None:
1336 peer_path = path
1336 peer_path = path
1337 url = path.loc
1337 url = path.loc
1338 else:
1338 else:
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1340 # valuable. For example as a "variant" as we do for pushes.
1340 # valuable. For example as a "variant" as we do for pushes.
1341 subpath = urlutil.url(subpath)
1341 subpath = urlutil.url(subpath)
1342 if subpath.isabs():
1342 if subpath.isabs():
1343 peer_path = url = bytes(subpath)
1343 peer_path = url = bytes(subpath)
1344 else:
1344 else:
1345 p = urlutil.url(path.loc)
1345 p = urlutil.url(path.loc)
1346 if p.islocal():
1346 if p.islocal():
1347 normpath = os.path.normpath
1347 normpath = os.path.normpath
1348 else:
1348 else:
1349 normpath = posixpath.normpath
1349 normpath = posixpath.normpath
1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1351 peer_path = url = bytes(p)
1351 peer_path = url = bytes(p)
1352 other = peer(repo, opts, peer_path)
1352 other = peer(repo, opts, peer_path)
1353 cleanupfn = other.close
1353 cleanupfn = other.close
1354 try:
1354 try:
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 branches = (path.branch, opts.get(b'branch', []))
1356 branches = (path.branch, opts.get(b'branch', []))
1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1358
1358
1359 if revs:
1359 if revs:
1360 revs = [other.lookup(rev) for rev in revs]
1360 revs = [other.lookup(rev) for rev in revs]
1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1363 )
1363 )
1364
1364
1365 if not chlist:
1365 if not chlist:
1366 ui.status(_(b"no changes found\n"))
1366 ui.status(_(b"no changes found\n"))
1367 return subreporecurse()
1367 return subreporecurse()
1368 ui.pager(b'incoming')
1368 ui.pager(b'incoming')
1369 displayer = logcmdutil.changesetdisplayer(
1369 displayer = logcmdutil.changesetdisplayer(
1370 ui, other, opts, buffered=buffered
1370 ui, other, opts, buffered=buffered
1371 )
1371 )
1372 displaychlist(other, chlist, displayer)
1372 displaychlist(other, chlist, displayer)
1373 displayer.close()
1373 displayer.close()
1374 finally:
1374 finally:
1375 cleanupfn()
1375 cleanupfn()
1376 subreporecurse()
1376 subreporecurse()
1377 return 0 # exit code is zero since we found incoming changes
1377 return 0 # exit code is zero since we found incoming changes
1378
1378
1379
1379
1380 def incoming(ui, repo, source, opts, subpath=None):
1380 def incoming(ui, repo, source, opts, subpath=None):
1381 def subreporecurse():
1381 def subreporecurse():
1382 ret = 1
1382 ret = 1
1383 if opts.get(b'subrepos'):
1383 if opts.get(b'subrepos'):
1384 ctx = repo[None]
1384 ctx = repo[None]
1385 for subpath in sorted(ctx.substate):
1385 for subpath in sorted(ctx.substate):
1386 sub = ctx.sub(subpath)
1386 sub = ctx.sub(subpath)
1387 ret = min(ret, sub.incoming(ui, source, opts))
1387 ret = min(ret, sub.incoming(ui, source, opts))
1388 return ret
1388 return ret
1389
1389
1390 def display(other, chlist, displayer):
1390 def display(other, chlist, displayer):
1391 limit = logcmdutil.getlimit(opts)
1391 limit = logcmdutil.getlimit(opts)
1392 if opts.get(b'newest_first'):
1392 if opts.get(b'newest_first'):
1393 chlist.reverse()
1393 chlist.reverse()
1394 count = 0
1394 count = 0
1395 for n in chlist:
1395 for n in chlist:
1396 if limit is not None and count >= limit:
1396 if limit is not None and count >= limit:
1397 break
1397 break
1398 parents = [
1398 parents = [
1399 p for p in other.changelog.parents(n) if p != repo.nullid
1399 p for p in other.changelog.parents(n) if p != repo.nullid
1400 ]
1400 ]
1401 if opts.get(b'no_merges') and len(parents) == 2:
1401 if opts.get(b'no_merges') and len(parents) == 2:
1402 continue
1402 continue
1403 count += 1
1403 count += 1
1404 displayer.show(other[n])
1404 displayer.show(other[n])
1405
1405
1406 return _incoming(
1406 return _incoming(
1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1408 )
1408 )
1409
1409
1410
1410
1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1412 out = set()
1412 out = set()
1413 others = []
1413 others = []
1414 for path in urlutil.get_push_paths(repo, ui, dests):
1414 for path in urlutil.get_push_paths(repo, ui, dests):
1415 dest = path.loc
1415 dest = path.loc
1416 if subpath is not None:
1416 if subpath is not None:
1417 subpath = urlutil.url(subpath)
1417 subpath = urlutil.url(subpath)
1418 if subpath.isabs():
1418 if subpath.isabs():
1419 dest = bytes(subpath)
1419 dest = bytes(subpath)
1420 else:
1420 else:
1421 p = urlutil.url(dest)
1421 p = urlutil.url(dest)
1422 if p.islocal():
1422 if p.islocal():
1423 normpath = os.path.normpath
1423 normpath = os.path.normpath
1424 else:
1424 else:
1425 normpath = posixpath.normpath
1425 normpath = posixpath.normpath
1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1427 dest = bytes(p)
1427 dest = bytes(p)
1428 branches = path.branch, opts.get(b'branch') or []
1428 branches = path.branch, opts.get(b'branch') or []
1429
1429
1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1432 if revs:
1432 if revs:
1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1434
1434
1435 other = peer(repo, opts, dest)
1435 other = peer(repo, opts, dest)
1436 try:
1436 try:
1437 outgoing = discovery.findcommonoutgoing(
1437 outgoing = discovery.findcommonoutgoing(
1438 repo, other, revs, force=opts.get(b'force')
1438 repo, other, revs, force=opts.get(b'force')
1439 )
1439 )
1440 o = outgoing.missing
1440 o = outgoing.missing
1441 out.update(o)
1441 out.update(o)
1442 if not o:
1442 if not o:
1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1444 others.append(other)
1444 others.append(other)
1445 except: # re-raises
1445 except: # re-raises
1446 other.close()
1446 other.close()
1447 raise
1447 raise
1448 # make sure this is ordered by revision number
1448 # make sure this is ordered by revision number
1449 outgoing_revs = list(out)
1449 outgoing_revs = list(out)
1450 cl = repo.changelog
1450 cl = repo.changelog
1451 outgoing_revs.sort(key=cl.rev)
1451 outgoing_revs.sort(key=cl.rev)
1452 return outgoing_revs, others
1452 return outgoing_revs, others
1453
1453
1454
1454
1455 def _outgoing_recurse(ui, repo, dests, opts):
1455 def _outgoing_recurse(ui, repo, dests, opts):
1456 ret = 1
1456 ret = 1
1457 if opts.get(b'subrepos'):
1457 if opts.get(b'subrepos'):
1458 ctx = repo[None]
1458 ctx = repo[None]
1459 for subpath in sorted(ctx.substate):
1459 for subpath in sorted(ctx.substate):
1460 sub = ctx.sub(subpath)
1460 sub = ctx.sub(subpath)
1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1462 return ret
1462 return ret
1463
1463
1464
1464
1465 def _outgoing_filter(repo, revs, opts):
1465 def _outgoing_filter(repo, revs, opts):
1466 """apply revision filtering/ordering option for outgoing"""
1466 """apply revision filtering/ordering option for outgoing"""
1467 limit = logcmdutil.getlimit(opts)
1467 limit = logcmdutil.getlimit(opts)
1468 no_merges = opts.get(b'no_merges')
1468 no_merges = opts.get(b'no_merges')
1469 if opts.get(b'newest_first'):
1469 if opts.get(b'newest_first'):
1470 revs.reverse()
1470 revs.reverse()
1471 if limit is None and not no_merges:
1471 if limit is None and not no_merges:
1472 for r in revs:
1472 for r in revs:
1473 yield r
1473 yield r
1474 return
1474 return
1475
1475
1476 count = 0
1476 count = 0
1477 cl = repo.changelog
1477 cl = repo.changelog
1478 for n in revs:
1478 for n in revs:
1479 if limit is not None and count >= limit:
1479 if limit is not None and count >= limit:
1480 break
1480 break
1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1482 if no_merges and len(parents) == 2:
1482 if no_merges and len(parents) == 2:
1483 continue
1483 continue
1484 count += 1
1484 count += 1
1485 yield n
1485 yield n
1486
1486
1487
1487
1488 def outgoing(ui, repo, dests, opts, subpath=None):
1488 def outgoing(ui, repo, dests, opts, subpath=None):
1489 if opts.get(b'graph'):
1489 if opts.get(b'graph'):
1490 logcmdutil.checkunsupportedgraphflags([], opts)
1490 logcmdutil.checkunsupportedgraphflags([], opts)
1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1492 ret = 1
1492 ret = 1
1493 try:
1493 try:
1494 if o:
1494 if o:
1495 ret = 0
1495 ret = 0
1496
1496
1497 if opts.get(b'graph'):
1497 if opts.get(b'graph'):
1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1499 ui.pager(b'outgoing')
1499 ui.pager(b'outgoing')
1500 displayer = logcmdutil.changesetdisplayer(
1500 displayer = logcmdutil.changesetdisplayer(
1501 ui, repo, opts, buffered=True
1501 ui, repo, opts, buffered=True
1502 )
1502 )
1503 logcmdutil.displaygraph(
1503 logcmdutil.displaygraph(
1504 ui, repo, revdag, displayer, graphmod.asciiedges
1504 ui, repo, revdag, displayer, graphmod.asciiedges
1505 )
1505 )
1506 else:
1506 else:
1507 ui.pager(b'outgoing')
1507 ui.pager(b'outgoing')
1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1509 for n in _outgoing_filter(repo, o, opts):
1509 for n in _outgoing_filter(repo, o, opts):
1510 displayer.show(repo[n])
1510 displayer.show(repo[n])
1511 displayer.close()
1511 displayer.close()
1512 for oth in others:
1512 for oth in others:
1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1515 return ret # exit code is zero since we found outgoing changes
1515 return ret # exit code is zero since we found outgoing changes
1516 finally:
1516 finally:
1517 for oth in others:
1517 for oth in others:
1518 oth.close()
1518 oth.close()
1519
1519
1520
1520
1521 def verify(repo, level=None):
1521 def verify(repo, level=None):
1522 """verify the consistency of a repository"""
1522 """verify the consistency of a repository"""
1523 ret = verifymod.verify(repo, level=level)
1523 ret = verifymod.verify(repo, level=level)
1524
1524
1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1527 # concern.
1527 # concern.
1528
1528
1529 # pathto() is needed for -R case
1529 # pathto() is needed for -R case
1530 revs = repo.revs(
1530 revs = repo.revs(
1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1532 )
1532 )
1533
1533
1534 if revs:
1534 if revs:
1535 repo.ui.status(_(b'checking subrepo links\n'))
1535 repo.ui.status(_(b'checking subrepo links\n'))
1536 for rev in revs:
1536 for rev in revs:
1537 ctx = repo[rev]
1537 ctx = repo[rev]
1538 try:
1538 try:
1539 for subpath in ctx.substate:
1539 for subpath in ctx.substate:
1540 try:
1540 try:
1541 ret = (
1541 ret = (
1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1543 )
1543 )
1544 except error.RepoError as e:
1544 except error.RepoError as e:
1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1546 except Exception:
1546 except Exception:
1547 repo.ui.warn(
1547 repo.ui.warn(
1548 _(b'.hgsubstate is corrupt in revision %s\n')
1548 _(b'.hgsubstate is corrupt in revision %s\n')
1549 % short(ctx.node())
1549 % short(ctx.node())
1550 )
1550 )
1551
1551
1552 return ret
1552 return ret
1553
1553
1554
1554
1555 def remoteui(src, opts):
1555 def remoteui(src, opts):
1556 """build a remote ui from ui or repo and opts"""
1556 """build a remote ui from ui or repo and opts"""
1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1558 dst = src.baseui.copy() # drop repo-specific config
1558 dst = src.baseui.copy() # drop repo-specific config
1559 src = src.ui # copy target options from repo
1559 src = src.ui # copy target options from repo
1560 else: # assume it's a global ui object
1560 else: # assume it's a global ui object
1561 dst = src.copy() # keep all global options
1561 dst = src.copy() # keep all global options
1562
1562
1563 # copy ssh-specific options
1563 # copy ssh-specific options
1564 for o in b'ssh', b'remotecmd':
1564 for o in b'ssh', b'remotecmd':
1565 v = opts.get(o) or src.config(b'ui', o)
1565 v = opts.get(o) or src.config(b'ui', o)
1566 if v:
1566 if v:
1567 dst.setconfig(b"ui", o, v, b'copied')
1567 dst.setconfig(b"ui", o, v, b'copied')
1568
1568
1569 # copy bundle-specific options
1569 # copy bundle-specific options
1570 r = src.config(b'bundle', b'mainreporoot')
1570 r = src.config(b'bundle', b'mainreporoot')
1571 if r:
1571 if r:
1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1573
1573
1574 # copy selected local settings to the remote ui
1574 # copy selected local settings to the remote ui
1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1576 for key, val in src.configitems(sect):
1576 for key, val in src.configitems(sect):
1577 dst.setconfig(sect, key, val, b'copied')
1577 dst.setconfig(sect, key, val, b'copied')
1578 v = src.config(b'web', b'cacerts')
1578 v = src.config(b'web', b'cacerts')
1579 if v:
1579 if v:
1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1581
1581
1582 return dst
1582 return dst
1583
1583
1584
1584
1585 # Files of interest
1585 # Files of interest
1586 # Used to check if the repository has changed looking at mtime and size of
1586 # Used to check if the repository has changed looking at mtime and size of
1587 # these files.
1587 # these files.
1588 foi = [
1588 foi = [
1589 (b'spath', b'00changelog.i'),
1589 (b'spath', b'00changelog.i'),
1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1591 (b'spath', b'obsstore'),
1591 (b'spath', b'obsstore'),
1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1593 ]
1593 ]
1594
1594
1595
1595
1596 class cachedlocalrepo:
1596 class cachedlocalrepo:
1597 """Holds a localrepository that can be cached and reused."""
1597 """Holds a localrepository that can be cached and reused."""
1598
1598
1599 def __init__(self, repo):
1599 def __init__(self, repo):
1600 """Create a new cached repo from an existing repo.
1600 """Create a new cached repo from an existing repo.
1601
1601
1602 We assume the passed in repo was recently created. If the
1602 We assume the passed in repo was recently created. If the
1603 repo has changed between when it was created and when it was
1603 repo has changed between when it was created and when it was
1604 turned into a cache, it may not refresh properly.
1604 turned into a cache, it may not refresh properly.
1605 """
1605 """
1606 assert isinstance(repo, localrepo.localrepository)
1606 assert isinstance(repo, localrepo.localrepository)
1607 self._repo = repo
1607 self._repo = repo
1608 self._state, self.mtime = self._repostate()
1608 self._state, self.mtime = self._repostate()
1609 self._filtername = repo.filtername
1609 self._filtername = repo.filtername
1610
1610
1611 def fetch(self):
1611 def fetch(self):
1612 """Refresh (if necessary) and return a repository.
1612 """Refresh (if necessary) and return a repository.
1613
1613
1614 If the cached instance is out of date, it will be recreated
1614 If the cached instance is out of date, it will be recreated
1615 automatically and returned.
1615 automatically and returned.
1616
1616
1617 Returns a tuple of the repo and a boolean indicating whether a new
1617 Returns a tuple of the repo and a boolean indicating whether a new
1618 repo instance was created.
1618 repo instance was created.
1619 """
1619 """
1620 # We compare the mtimes and sizes of some well-known files to
1620 # We compare the mtimes and sizes of some well-known files to
1621 # determine if the repo changed. This is not precise, as mtimes
1621 # determine if the repo changed. This is not precise, as mtimes
1622 # are susceptible to clock skew and imprecise filesystems and
1622 # are susceptible to clock skew and imprecise filesystems and
1623 # file content can change while maintaining the same size.
1623 # file content can change while maintaining the same size.
1624
1624
1625 state, mtime = self._repostate()
1625 state, mtime = self._repostate()
1626 if state == self._state:
1626 if state == self._state:
1627 return self._repo, False
1627 return self._repo, False
1628
1628
1629 repo = repository(self._repo.baseui, self._repo.url())
1629 repo = repository(self._repo.baseui, self._repo.url())
1630 if self._filtername:
1630 if self._filtername:
1631 self._repo = repo.filtered(self._filtername)
1631 self._repo = repo.filtered(self._filtername)
1632 else:
1632 else:
1633 self._repo = repo.unfiltered()
1633 self._repo = repo.unfiltered()
1634 self._state = state
1634 self._state = state
1635 self.mtime = mtime
1635 self.mtime = mtime
1636
1636
1637 return self._repo, True
1637 return self._repo, True
1638
1638
1639 def _repostate(self):
1639 def _repostate(self):
1640 state = []
1640 state = []
1641 maxmtime = -1
1641 maxmtime = -1
1642 for attr, fname in foi:
1642 for attr, fname in foi:
1643 prefix = getattr(self._repo, attr)
1643 prefix = getattr(self._repo, attr)
1644 p = os.path.join(prefix, fname)
1644 p = os.path.join(prefix, fname)
1645 try:
1645 try:
1646 st = os.stat(p)
1646 st = os.stat(p)
1647 except OSError:
1647 except OSError:
1648 st = os.stat(prefix)
1648 st = os.stat(prefix)
1649 state.append((st[stat.ST_MTIME], st.st_size))
1649 state.append((st[stat.ST_MTIME], st.st_size))
1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1651
1651
1652 return tuple(state), maxmtime
1652 return tuple(state), maxmtime
1653
1653
1654 def copy(self):
1654 def copy(self):
1655 """Obtain a copy of this class instance.
1655 """Obtain a copy of this class instance.
1656
1656
1657 A new localrepository instance is obtained. The new instance should be
1657 A new localrepository instance is obtained. The new instance should be
1658 completely independent of the original.
1658 completely independent of the original.
1659 """
1659 """
1660 repo = repository(self._repo.baseui, self._repo.origroot)
1660 repo = repository(self._repo.baseui, self._repo.origroot)
1661 if self._filtername:
1661 if self._filtername:
1662 repo = repo.filtered(self._filtername)
1662 repo = repo.filtered(self._filtername)
1663 else:
1663 else:
1664 repo = repo.unfiltered()
1664 repo = repo.unfiltered()
1665 c = cachedlocalrepo(repo)
1665 c = cachedlocalrepo(repo)
1666 c._state = self._state
1666 c._state = self._state
1667 c.mtime = self.mtime
1667 c.mtime = self.mtime
1668 return c
1668 return c
@@ -1,642 +1,643 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import io
11 import io
12 import os
12 import os
13 import socket
13 import socket
14 import struct
14 import struct
15
15
16 from concurrent import futures
16 from concurrent import futures
17 from .i18n import _
17 from .i18n import _
18 from .pycompat import getattr
18 from .pycompat import getattr
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 statichttprepo,
24 statichttprepo,
25 url as urlmod,
25 url as urlmod,
26 util,
26 util,
27 wireprotov1peer,
27 wireprotov1peer,
28 )
28 )
29 from .utils import urlutil
29 from .utils import urlutil
30
30
31 httplib = util.httplib
31 httplib = util.httplib
32 urlerr = util.urlerr
32 urlerr = util.urlerr
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35
35
36 def encodevalueinheaders(value, header, limit):
36 def encodevalueinheaders(value, header, limit):
37 """Encode a string value into multiple HTTP headers.
37 """Encode a string value into multiple HTTP headers.
38
38
39 ``value`` will be encoded into 1 or more HTTP headers with the names
39 ``value`` will be encoded into 1 or more HTTP headers with the names
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
41 name + value will be at most ``limit`` bytes long.
41 name + value will be at most ``limit`` bytes long.
42
42
43 Returns an iterable of 2-tuples consisting of header names and
43 Returns an iterable of 2-tuples consisting of header names and
44 values as native strings.
44 values as native strings.
45 """
45 """
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
47 # not bytes. This function always takes bytes in as arguments.
47 # not bytes. This function always takes bytes in as arguments.
48 fmt = pycompat.strurl(header) + r'-%s'
48 fmt = pycompat.strurl(header) + r'-%s'
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
50 # and not a unicode: we're just getting the encoded length anyway,
50 # and not a unicode: we're just getting the encoded length anyway,
51 # and using an r-string to make it portable between Python 2 and 3
51 # and using an r-string to make it portable between Python 2 and 3
52 # doesn't work because then the \r is a literal backslash-r
52 # doesn't work because then the \r is a literal backslash-r
53 # instead of a carriage return.
53 # instead of a carriage return.
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
55 result = []
55 result = []
56
56
57 n = 0
57 n = 0
58 for i in range(0, len(value), valuelen):
58 for i in range(0, len(value), valuelen):
59 n += 1
59 n += 1
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
61
61
62 return result
62 return result
63
63
64
64
65 class _multifile:
65 class _multifile:
66 def __init__(self, *fileobjs):
66 def __init__(self, *fileobjs):
67 for f in fileobjs:
67 for f in fileobjs:
68 if not util.safehasattr(f, b'length'):
68 if not util.safehasattr(f, b'length'):
69 raise ValueError(
69 raise ValueError(
70 b'_multifile only supports file objects that '
70 b'_multifile only supports file objects that '
71 b'have a length but this one does not:',
71 b'have a length but this one does not:',
72 type(f),
72 type(f),
73 f,
73 f,
74 )
74 )
75 self._fileobjs = fileobjs
75 self._fileobjs = fileobjs
76 self._index = 0
76 self._index = 0
77
77
78 @property
78 @property
79 def length(self):
79 def length(self):
80 return sum(f.length for f in self._fileobjs)
80 return sum(f.length for f in self._fileobjs)
81
81
82 def read(self, amt=None):
82 def read(self, amt=None):
83 if amt <= 0:
83 if amt <= 0:
84 return b''.join(f.read() for f in self._fileobjs)
84 return b''.join(f.read() for f in self._fileobjs)
85 parts = []
85 parts = []
86 while amt and self._index < len(self._fileobjs):
86 while amt and self._index < len(self._fileobjs):
87 parts.append(self._fileobjs[self._index].read(amt))
87 parts.append(self._fileobjs[self._index].read(amt))
88 got = len(parts[-1])
88 got = len(parts[-1])
89 if got < amt:
89 if got < amt:
90 self._index += 1
90 self._index += 1
91 amt -= got
91 amt -= got
92 return b''.join(parts)
92 return b''.join(parts)
93
93
94 def seek(self, offset, whence=os.SEEK_SET):
94 def seek(self, offset, whence=os.SEEK_SET):
95 if whence != os.SEEK_SET:
95 if whence != os.SEEK_SET:
96 raise NotImplementedError(
96 raise NotImplementedError(
97 b'_multifile does not support anything other'
97 b'_multifile does not support anything other'
98 b' than os.SEEK_SET for whence on seek()'
98 b' than os.SEEK_SET for whence on seek()'
99 )
99 )
100 if offset != 0:
100 if offset != 0:
101 raise NotImplementedError(
101 raise NotImplementedError(
102 b'_multifile only supports seeking to start, but that '
102 b'_multifile only supports seeking to start, but that '
103 b'could be fixed if you need it'
103 b'could be fixed if you need it'
104 )
104 )
105 for f in self._fileobjs:
105 for f in self._fileobjs:
106 f.seek(0)
106 f.seek(0)
107 self._index = 0
107 self._index = 0
108
108
109
109
110 def makev1commandrequest(
110 def makev1commandrequest(
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
112 ):
112 ):
113 """Make an HTTP request to run a command for a version 1 client.
113 """Make an HTTP request to run a command for a version 1 client.
114
114
115 ``caps`` is a set of known server capabilities. The value may be
115 ``caps`` is a set of known server capabilities. The value may be
116 None if capabilities are not yet known.
116 None if capabilities are not yet known.
117
117
118 ``capablefn`` is a function to evaluate a capability.
118 ``capablefn`` is a function to evaluate a capability.
119
119
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 raw data to pass to it.
121 raw data to pass to it.
122 """
122 """
123 if cmd == b'pushkey':
123 if cmd == b'pushkey':
124 args[b'data'] = b''
124 args[b'data'] = b''
125 data = args.pop(b'data', None)
125 data = args.pop(b'data', None)
126 headers = args.pop(b'headers', {})
126 headers = args.pop(b'headers', {})
127
127
128 ui.debug(b"sending %s command\n" % cmd)
128 ui.debug(b"sending %s command\n" % cmd)
129 q = [(b'cmd', cmd)]
129 q = [(b'cmd', cmd)]
130 headersize = 0
130 headersize = 0
131 # Important: don't use self.capable() here or else you end up
131 # Important: don't use self.capable() here or else you end up
132 # with infinite recursion when trying to look up capabilities
132 # with infinite recursion when trying to look up capabilities
133 # for the first time.
133 # for the first time.
134 postargsok = caps is not None and b'httppostargs' in caps
134 postargsok = caps is not None and b'httppostargs' in caps
135
135
136 # Send arguments via POST.
136 # Send arguments via POST.
137 if postargsok and args:
137 if postargsok and args:
138 strargs = urlreq.urlencode(sorted(args.items()))
138 strargs = urlreq.urlencode(sorted(args.items()))
139 if not data:
139 if not data:
140 data = strargs
140 data = strargs
141 else:
141 else:
142 if isinstance(data, bytes):
142 if isinstance(data, bytes):
143 i = io.BytesIO(data)
143 i = io.BytesIO(data)
144 i.length = len(data)
144 i.length = len(data)
145 data = i
145 data = i
146 argsio = io.BytesIO(strargs)
146 argsio = io.BytesIO(strargs)
147 argsio.length = len(strargs)
147 argsio.length = len(strargs)
148 data = _multifile(argsio, data)
148 data = _multifile(argsio, data)
149 headers['X-HgArgs-Post'] = len(strargs)
149 headers['X-HgArgs-Post'] = len(strargs)
150 elif args:
150 elif args:
151 # Calling self.capable() can infinite loop if we are calling
151 # Calling self.capable() can infinite loop if we are calling
152 # "capabilities". But that command should never accept wire
152 # "capabilities". But that command should never accept wire
153 # protocol arguments. So this should never happen.
153 # protocol arguments. So this should never happen.
154 assert cmd != b'capabilities'
154 assert cmd != b'capabilities'
155 httpheader = capablefn(b'httpheader')
155 httpheader = capablefn(b'httpheader')
156 if httpheader:
156 if httpheader:
157 headersize = int(httpheader.split(b',', 1)[0])
157 headersize = int(httpheader.split(b',', 1)[0])
158
158
159 # Send arguments via HTTP headers.
159 # Send arguments via HTTP headers.
160 if headersize > 0:
160 if headersize > 0:
161 # The headers can typically carry more data than the URL.
161 # The headers can typically carry more data than the URL.
162 encoded_args = urlreq.urlencode(sorted(args.items()))
162 encoded_args = urlreq.urlencode(sorted(args.items()))
163 for header, value in encodevalueinheaders(
163 for header, value in encodevalueinheaders(
164 encoded_args, b'X-HgArg', headersize
164 encoded_args, b'X-HgArg', headersize
165 ):
165 ):
166 headers[header] = value
166 headers[header] = value
167 # Send arguments via query string (Mercurial <1.9).
167 # Send arguments via query string (Mercurial <1.9).
168 else:
168 else:
169 q += sorted(args.items())
169 q += sorted(args.items())
170
170
171 qs = b'?%s' % urlreq.urlencode(q)
171 qs = b'?%s' % urlreq.urlencode(q)
172 cu = b"%s%s" % (repobaseurl, qs)
172 cu = b"%s%s" % (repobaseurl, qs)
173 size = 0
173 size = 0
174 if util.safehasattr(data, b'length'):
174 if util.safehasattr(data, b'length'):
175 size = data.length
175 size = data.length
176 elif data is not None:
176 elif data is not None:
177 size = len(data)
177 size = len(data)
178 if data is not None and 'Content-Type' not in headers:
178 if data is not None and 'Content-Type' not in headers:
179 headers['Content-Type'] = 'application/mercurial-0.1'
179 headers['Content-Type'] = 'application/mercurial-0.1'
180
180
181 # Tell the server we accept application/mercurial-0.2 and multiple
181 # Tell the server we accept application/mercurial-0.2 and multiple
182 # compression formats if the server is capable of emitting those
182 # compression formats if the server is capable of emitting those
183 # payloads.
183 # payloads.
184 # Note: Keep this set empty by default, as client advertisement of
184 # Note: Keep this set empty by default, as client advertisement of
185 # protocol parameters should only occur after the handshake.
185 # protocol parameters should only occur after the handshake.
186 protoparams = set()
186 protoparams = set()
187
187
188 mediatypes = set()
188 mediatypes = set()
189 if caps is not None:
189 if caps is not None:
190 mt = capablefn(b'httpmediatype')
190 mt = capablefn(b'httpmediatype')
191 if mt:
191 if mt:
192 protoparams.add(b'0.1')
192 protoparams.add(b'0.1')
193 mediatypes = set(mt.split(b','))
193 mediatypes = set(mt.split(b','))
194
194
195 protoparams.add(b'partial-pull')
195 protoparams.add(b'partial-pull')
196
196
197 if b'0.2tx' in mediatypes:
197 if b'0.2tx' in mediatypes:
198 protoparams.add(b'0.2')
198 protoparams.add(b'0.2')
199
199
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
201 # We /could/ compare supported compression formats and prune
201 # We /could/ compare supported compression formats and prune
202 # non-mutually supported or error if nothing is mutually supported.
202 # non-mutually supported or error if nothing is mutually supported.
203 # For now, send the full list to the server and have it error.
203 # For now, send the full list to the server and have it error.
204 comps = [
204 comps = [
205 e.wireprotosupport().name
205 e.wireprotosupport().name
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
207 ]
207 ]
208 protoparams.add(b'comp=%s' % b','.join(comps))
208 protoparams.add(b'comp=%s' % b','.join(comps))
209
209
210 if protoparams:
210 if protoparams:
211 protoheaders = encodevalueinheaders(
211 protoheaders = encodevalueinheaders(
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
213 )
213 )
214 for header, value in protoheaders:
214 for header, value in protoheaders:
215 headers[header] = value
215 headers[header] = value
216
216
217 varyheaders = []
217 varyheaders = []
218 for header in headers:
218 for header in headers:
219 if header.lower().startswith('x-hg'):
219 if header.lower().startswith('x-hg'):
220 varyheaders.append(header)
220 varyheaders.append(header)
221
221
222 if varyheaders:
222 if varyheaders:
223 headers['Vary'] = ','.join(sorted(varyheaders))
223 headers['Vary'] = ','.join(sorted(varyheaders))
224
224
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
226
226
227 if data is not None:
227 if data is not None:
228 ui.debug(b"sending %d bytes\n" % size)
228 ui.debug(b"sending %d bytes\n" % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
230
230
231 return req, cu, qs
231 return req, cu, qs
232
232
233
233
234 def sendrequest(ui, opener, req):
234 def sendrequest(ui, opener, req):
235 """Send a prepared HTTP request.
235 """Send a prepared HTTP request.
236
236
237 Returns the response object.
237 Returns the response object.
238 """
238 """
239 dbg = ui.debug
239 dbg = ui.debug
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
241 line = b'devel-peer-request: %s\n'
241 line = b'devel-peer-request: %s\n'
242 dbg(
242 dbg(
243 line
243 line
244 % b'%s %s'
244 % b'%s %s'
245 % (
245 % (
246 pycompat.bytesurl(req.get_method()),
246 pycompat.bytesurl(req.get_method()),
247 pycompat.bytesurl(req.get_full_url()),
247 pycompat.bytesurl(req.get_full_url()),
248 )
248 )
249 )
249 )
250 hgargssize = None
250 hgargssize = None
251
251
252 for header, value in sorted(req.header_items()):
252 for header, value in sorted(req.header_items()):
253 header = pycompat.bytesurl(header)
253 header = pycompat.bytesurl(header)
254 value = pycompat.bytesurl(value)
254 value = pycompat.bytesurl(value)
255 if header.startswith(b'X-hgarg-'):
255 if header.startswith(b'X-hgarg-'):
256 if hgargssize is None:
256 if hgargssize is None:
257 hgargssize = 0
257 hgargssize = 0
258 hgargssize += len(value)
258 hgargssize += len(value)
259 else:
259 else:
260 dbg(line % b' %s %s' % (header, value))
260 dbg(line % b' %s %s' % (header, value))
261
261
262 if hgargssize is not None:
262 if hgargssize is not None:
263 dbg(
263 dbg(
264 line
264 line
265 % b' %d bytes of commands arguments in headers'
265 % b' %d bytes of commands arguments in headers'
266 % hgargssize
266 % hgargssize
267 )
267 )
268 data = req.data
268 data = req.data
269 if data is not None:
269 if data is not None:
270 length = getattr(data, 'length', None)
270 length = getattr(data, 'length', None)
271 if length is None:
271 if length is None:
272 length = len(data)
272 length = len(data)
273 dbg(line % b' %d bytes of data' % length)
273 dbg(line % b' %d bytes of data' % length)
274
274
275 start = util.timer()
275 start = util.timer()
276
276
277 res = None
277 res = None
278 try:
278 try:
279 res = opener.open(req)
279 res = opener.open(req)
280 except urlerr.httperror as inst:
280 except urlerr.httperror as inst:
281 if inst.code == 401:
281 if inst.code == 401:
282 raise error.Abort(_(b'authorization failed'))
282 raise error.Abort(_(b'authorization failed'))
283 raise
283 raise
284 except httplib.HTTPException as inst:
284 except httplib.HTTPException as inst:
285 ui.debug(
285 ui.debug(
286 b'http error requesting %s\n'
286 b'http error requesting %s\n'
287 % urlutil.hidepassword(req.get_full_url())
287 % urlutil.hidepassword(req.get_full_url())
288 )
288 )
289 ui.traceback()
289 ui.traceback()
290 raise IOError(None, inst)
290 raise IOError(None, inst)
291 finally:
291 finally:
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
293 code = res.code if res else -1
293 code = res.code if res else -1
294 dbg(
294 dbg(
295 line
295 line
296 % b' finished in %.4f seconds (%d)'
296 % b' finished in %.4f seconds (%d)'
297 % (util.timer() - start, code)
297 % (util.timer() - start, code)
298 )
298 )
299
299
300 # Insert error handlers for common I/O failures.
300 # Insert error handlers for common I/O failures.
301 urlmod.wrapresponse(res)
301 urlmod.wrapresponse(res)
302
302
303 return res
303 return res
304
304
305
305
306 class RedirectedRepoError(error.RepoError):
306 class RedirectedRepoError(error.RepoError):
307 def __init__(self, msg, respurl):
307 def __init__(self, msg, respurl):
308 super(RedirectedRepoError, self).__init__(msg)
308 super(RedirectedRepoError, self).__init__(msg)
309 self.respurl = respurl
309 self.respurl = respurl
310
310
311
311
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
313 # record the url we got redirected to
313 # record the url we got redirected to
314 redirected = False
314 redirected = False
315 respurl = pycompat.bytesurl(resp.geturl())
315 respurl = pycompat.bytesurl(resp.geturl())
316 if respurl.endswith(qs):
316 if respurl.endswith(qs):
317 respurl = respurl[: -len(qs)]
317 respurl = respurl[: -len(qs)]
318 qsdropped = False
318 qsdropped = False
319 else:
319 else:
320 qsdropped = True
320 qsdropped = True
321
321
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
323 redirected = True
323 redirected = True
324 if not ui.quiet:
324 if not ui.quiet:
325 ui.warn(_(b'real URL is %s\n') % respurl)
325 ui.warn(_(b'real URL is %s\n') % respurl)
326
326
327 try:
327 try:
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
329 except AttributeError:
329 except AttributeError:
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
331
331
332 safeurl = urlutil.hidepassword(baseurl)
332 safeurl = urlutil.hidepassword(baseurl)
333 if proto.startswith(b'application/hg-error'):
333 if proto.startswith(b'application/hg-error'):
334 raise error.OutOfBandError(resp.read())
334 raise error.OutOfBandError(resp.read())
335
335
336 # Pre 1.0 versions of Mercurial used text/plain and
336 # Pre 1.0 versions of Mercurial used text/plain and
337 # application/hg-changegroup. We don't support such old servers.
337 # application/hg-changegroup. We don't support such old servers.
338 if not proto.startswith(b'application/mercurial-'):
338 if not proto.startswith(b'application/mercurial-'):
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
340 msg = _(
340 msg = _(
341 b"'%s' does not appear to be an hg repository:\n"
341 b"'%s' does not appear to be an hg repository:\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
344
344
345 # Some servers may strip the query string from the redirect. We
345 # Some servers may strip the query string from the redirect. We
346 # raise a special error type so callers can react to this specially.
346 # raise a special error type so callers can react to this specially.
347 if redirected and qsdropped:
347 if redirected and qsdropped:
348 raise RedirectedRepoError(msg, respurl)
348 raise RedirectedRepoError(msg, respurl)
349 else:
349 else:
350 raise error.RepoError(msg)
350 raise error.RepoError(msg)
351
351
352 try:
352 try:
353 subtype = proto.split(b'-', 1)[1]
353 subtype = proto.split(b'-', 1)[1]
354
354
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
356 except ValueError:
356 except ValueError:
357 raise error.RepoError(
357 raise error.RepoError(
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
359 )
359 )
360
360
361 # TODO consider switching to a decompression reader that uses
361 # TODO consider switching to a decompression reader that uses
362 # generators.
362 # generators.
363 if version_info == (0, 1):
363 if version_info == (0, 1):
364 if compressible:
364 if compressible:
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
366
366
367 elif version_info == (0, 2):
367 elif version_info == (0, 2):
368 # application/mercurial-0.2 always identifies the compression
368 # application/mercurial-0.2 always identifies the compression
369 # engine in the payload header.
369 # engine in the payload header.
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
371 ename = util.readexactly(resp, elen)
371 ename = util.readexactly(resp, elen)
372 engine = util.compengines.forwiretype(ename)
372 engine = util.compengines.forwiretype(ename)
373
373
374 resp = engine.decompressorreader(resp)
374 resp = engine.decompressorreader(resp)
375 else:
375 else:
376 raise error.RepoError(
376 raise error.RepoError(
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
378 )
378 )
379
379
380 return respurl, proto, resp
380 return respurl, proto, resp
381
381
382
382
383 class httppeer(wireprotov1peer.wirepeer):
383 class httppeer(wireprotov1peer.wirepeer):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 super().__init__(ui)
385 super().__init__(ui)
386 self._path = path
386 self._path = path
387 self._url = url
387 self._url = url
388 self._caps = caps
388 self._caps = caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
390 self._urlopener = opener
390 self._urlopener = opener
391 self._requestbuilder = requestbuilder
391 self._requestbuilder = requestbuilder
392
392
393 def __del__(self):
393 def __del__(self):
394 for h in self._urlopener.handlers:
394 for h in self._urlopener.handlers:
395 h.close()
395 h.close()
396 getattr(h, "close_all", lambda: None)()
396 getattr(h, "close_all", lambda: None)()
397
397
398 # Begin of ipeerconnection interface.
398 # Begin of ipeerconnection interface.
399
399
400 def url(self):
400 def url(self):
401 return self._path
401 return self._path
402
402
403 def local(self):
403 def local(self):
404 return None
404 return None
405
405
406 def canpush(self):
406 def canpush(self):
407 return True
407 return True
408
408
409 def close(self):
409 def close(self):
410 try:
410 try:
411 reqs, sent, recv = (
411 reqs, sent, recv = (
412 self._urlopener.requestscount,
412 self._urlopener.requestscount,
413 self._urlopener.sentbytescount,
413 self._urlopener.sentbytescount,
414 self._urlopener.receivedbytescount,
414 self._urlopener.receivedbytescount,
415 )
415 )
416 except AttributeError:
416 except AttributeError:
417 return
417 return
418 self.ui.note(
418 self.ui.note(
419 _(
419 _(
420 b'(sent %d HTTP requests and %d bytes; '
420 b'(sent %d HTTP requests and %d bytes; '
421 b'received %d bytes in responses)\n'
421 b'received %d bytes in responses)\n'
422 )
422 )
423 % (reqs, sent, recv)
423 % (reqs, sent, recv)
424 )
424 )
425
425
426 # End of ipeerconnection interface.
426 # End of ipeerconnection interface.
427
427
428 # Begin of ipeercommands interface.
428 # Begin of ipeercommands interface.
429
429
430 def capabilities(self):
430 def capabilities(self):
431 return self._caps
431 return self._caps
432
432
433 # End of ipeercommands interface.
433 # End of ipeercommands interface.
434
434
435 def _callstream(self, cmd, _compressible=False, **args):
435 def _callstream(self, cmd, _compressible=False, **args):
436 args = pycompat.byteskwargs(args)
436 args = pycompat.byteskwargs(args)
437
437
438 req, cu, qs = makev1commandrequest(
438 req, cu, qs = makev1commandrequest(
439 self.ui,
439 self.ui,
440 self._requestbuilder,
440 self._requestbuilder,
441 self._caps,
441 self._caps,
442 self.capable,
442 self.capable,
443 self._url,
443 self._url,
444 cmd,
444 cmd,
445 args,
445 args,
446 )
446 )
447
447
448 resp = sendrequest(self.ui, self._urlopener, req)
448 resp = sendrequest(self.ui, self._urlopener, req)
449
449
450 self._url, ct, resp = parsev1commandresponse(
450 self._url, ct, resp = parsev1commandresponse(
451 self.ui, self._url, cu, qs, resp, _compressible
451 self.ui, self._url, cu, qs, resp, _compressible
452 )
452 )
453
453
454 return resp
454 return resp
455
455
456 def _call(self, cmd, **args):
456 def _call(self, cmd, **args):
457 fp = self._callstream(cmd, **args)
457 fp = self._callstream(cmd, **args)
458 try:
458 try:
459 return fp.read()
459 return fp.read()
460 finally:
460 finally:
461 # if using keepalive, allow connection to be reused
461 # if using keepalive, allow connection to be reused
462 fp.close()
462 fp.close()
463
463
464 def _callpush(self, cmd, cg, **args):
464 def _callpush(self, cmd, cg, **args):
465 # have to stream bundle to a temp file because we do not have
465 # have to stream bundle to a temp file because we do not have
466 # http 1.1 chunked transfer.
466 # http 1.1 chunked transfer.
467
467
468 types = self.capable(b'unbundle')
468 types = self.capable(b'unbundle')
469 try:
469 try:
470 types = types.split(b',')
470 types = types.split(b',')
471 except AttributeError:
471 except AttributeError:
472 # servers older than d1b16a746db6 will send 'unbundle' as a
472 # servers older than d1b16a746db6 will send 'unbundle' as a
473 # boolean capability. They only support headerless/uncompressed
473 # boolean capability. They only support headerless/uncompressed
474 # bundles.
474 # bundles.
475 types = [b""]
475 types = [b""]
476 for x in types:
476 for x in types:
477 if x in bundle2.bundletypes:
477 if x in bundle2.bundletypes:
478 type = x
478 type = x
479 break
479 break
480
480
481 tempname = bundle2.writebundle(self.ui, cg, None, type)
481 tempname = bundle2.writebundle(self.ui, cg, None, type)
482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
483 headers = {'Content-Type': 'application/mercurial-0.1'}
483 headers = {'Content-Type': 'application/mercurial-0.1'}
484
484
485 try:
485 try:
486 r = self._call(cmd, data=fp, headers=headers, **args)
486 r = self._call(cmd, data=fp, headers=headers, **args)
487 vals = r.split(b'\n', 1)
487 vals = r.split(b'\n', 1)
488 if len(vals) < 2:
488 if len(vals) < 2:
489 raise error.ResponseError(_(b"unexpected response:"), r)
489 raise error.ResponseError(_(b"unexpected response:"), r)
490 return vals
490 return vals
491 except urlerr.httperror:
491 except urlerr.httperror:
492 # Catch and re-raise these so we don't try and treat them
492 # Catch and re-raise these so we don't try and treat them
493 # like generic socket errors. They lack any values in
493 # like generic socket errors. They lack any values in
494 # .args on Python 3 which breaks our socket.error block.
494 # .args on Python 3 which breaks our socket.error block.
495 raise
495 raise
496 except socket.error as err:
496 except socket.error as err:
497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
498 raise error.Abort(_(b'push failed: %s') % err.args[1])
498 raise error.Abort(_(b'push failed: %s') % err.args[1])
499 raise error.Abort(err.args[1])
499 raise error.Abort(err.args[1])
500 finally:
500 finally:
501 fp.close()
501 fp.close()
502 os.unlink(tempname)
502 os.unlink(tempname)
503
503
504 def _calltwowaystream(self, cmd, fp, **args):
504 def _calltwowaystream(self, cmd, fp, **args):
505 filename = None
505 filename = None
506 try:
506 try:
507 # dump bundle to disk
507 # dump bundle to disk
508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
509 with os.fdopen(fd, "wb") as fh:
509 with os.fdopen(fd, "wb") as fh:
510 d = fp.read(4096)
510 d = fp.read(4096)
511 while d:
511 while d:
512 fh.write(d)
512 fh.write(d)
513 d = fp.read(4096)
513 d = fp.read(4096)
514 # start http push
514 # start http push
515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
516 headers = {'Content-Type': 'application/mercurial-0.1'}
516 headers = {'Content-Type': 'application/mercurial-0.1'}
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
518 finally:
518 finally:
519 if filename is not None:
519 if filename is not None:
520 os.unlink(filename)
520 os.unlink(filename)
521
521
522 def _callcompressable(self, cmd, **args):
522 def _callcompressable(self, cmd, **args):
523 return self._callstream(cmd, _compressible=True, **args)
523 return self._callstream(cmd, _compressible=True, **args)
524
524
525 def _abort(self, exception):
525 def _abort(self, exception):
526 raise exception
526 raise exception
527
527
528
528
529 class queuedcommandfuture(futures.Future):
529 class queuedcommandfuture(futures.Future):
530 """Wraps result() on command futures to trigger submission on call."""
530 """Wraps result() on command futures to trigger submission on call."""
531
531
532 def result(self, timeout=None):
532 def result(self, timeout=None):
533 if self.done():
533 if self.done():
534 return futures.Future.result(self, timeout)
534 return futures.Future.result(self, timeout)
535
535
536 self._peerexecutor.sendcommands()
536 self._peerexecutor.sendcommands()
537
537
538 # sendcommands() will restore the original __class__ and self.result
538 # sendcommands() will restore the original __class__ and self.result
539 # will resolve to Future.result.
539 # will resolve to Future.result.
540 return self.result(timeout)
540 return self.result(timeout)
541
541
542
542
543 def performhandshake(ui, url, opener, requestbuilder):
543 def performhandshake(ui, url, opener, requestbuilder):
544 # The handshake is a request to the capabilities command.
544 # The handshake is a request to the capabilities command.
545
545
546 caps = None
546 caps = None
547
547
548 def capable(x):
548 def capable(x):
549 raise error.ProgrammingError(b'should not be called')
549 raise error.ProgrammingError(b'should not be called')
550
550
551 args = {}
551 args = {}
552
552
553 req, requrl, qs = makev1commandrequest(
553 req, requrl, qs = makev1commandrequest(
554 ui, requestbuilder, caps, capable, url, b'capabilities', args
554 ui, requestbuilder, caps, capable, url, b'capabilities', args
555 )
555 )
556 resp = sendrequest(ui, opener, req)
556 resp = sendrequest(ui, opener, req)
557
557
558 # The server may redirect us to the repo root, stripping the
558 # The server may redirect us to the repo root, stripping the
559 # ?cmd=capabilities query string from the URL. The server would likely
559 # ?cmd=capabilities query string from the URL. The server would likely
560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
561 # We catch this special case and re-issue the capabilities request against
561 # We catch this special case and re-issue the capabilities request against
562 # the new URL.
562 # the new URL.
563 #
563 #
564 # We should ideally not do this, as a redirect that drops the query
564 # We should ideally not do this, as a redirect that drops the query
565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
566 # However, Mercurial clients for several years appeared to handle this
566 # However, Mercurial clients for several years appeared to handle this
567 # issue without behavior degradation. And according to issue 5860, it may
567 # issue without behavior degradation. And according to issue 5860, it may
568 # be a longstanding bug in some server implementations. So we allow a
568 # be a longstanding bug in some server implementations. So we allow a
569 # redirect that drops the query string to "just work."
569 # redirect that drops the query string to "just work."
570 try:
570 try:
571 respurl, ct, resp = parsev1commandresponse(
571 respurl, ct, resp = parsev1commandresponse(
572 ui, url, requrl, qs, resp, compressible=False
572 ui, url, requrl, qs, resp, compressible=False
573 )
573 )
574 except RedirectedRepoError as e:
574 except RedirectedRepoError as e:
575 req, requrl, qs = makev1commandrequest(
575 req, requrl, qs = makev1commandrequest(
576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
577 )
577 )
578 resp = sendrequest(ui, opener, req)
578 resp = sendrequest(ui, opener, req)
579 respurl, ct, resp = parsev1commandresponse(
579 respurl, ct, resp = parsev1commandresponse(
580 ui, url, requrl, qs, resp, compressible=False
580 ui, url, requrl, qs, resp, compressible=False
581 )
581 )
582
582
583 try:
583 try:
584 rawdata = resp.read()
584 rawdata = resp.read()
585 finally:
585 finally:
586 resp.close()
586 resp.close()
587
587
588 if not ct.startswith(b'application/mercurial-'):
588 if not ct.startswith(b'application/mercurial-'):
589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
590
590
591 info = {b'v1capabilities': set(rawdata.split())}
591 info = {b'v1capabilities': set(rawdata.split())}
592
592
593 return respurl, info
593 return respurl, info
594
594
595
595
596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
597 """Construct an appropriate HTTP peer instance.
597 """Construct an appropriate HTTP peer instance.
598
598
599 ``opener`` is an ``url.opener`` that should be used to establish
599 ``opener`` is an ``url.opener`` that should be used to establish
600 connections, perform HTTP requests.
600 connections, perform HTTP requests.
601
601
602 ``requestbuilder`` is the type used for constructing HTTP requests.
602 ``requestbuilder`` is the type used for constructing HTTP requests.
603 It exists as an argument so extensions can override the default.
603 It exists as an argument so extensions can override the default.
604 """
604 """
605 u = urlutil.url(path)
605 u = urlutil.url(path)
606 if u.query or u.fragment:
606 if u.query or u.fragment:
607 raise error.Abort(
607 raise error.Abort(
608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
609 )
609 )
610
610
611 # urllib cannot handle URLs with embedded user or passwd.
611 # urllib cannot handle URLs with embedded user or passwd.
612 url, authinfo = u.authinfo()
612 url, authinfo = u.authinfo()
613 ui.debug(b'using %s\n' % url)
613 ui.debug(b'using %s\n' % url)
614
614
615 opener = opener or urlmod.opener(ui, authinfo)
615 opener = opener or urlmod.opener(ui, authinfo)
616
616
617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
618
618
619 return httppeer(
619 return httppeer(
620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
621 )
621 )
622
622
623
623
624 def make_peer(ui, path, create, intents=None, createopts=None):
624 def make_peer(ui, path, create, intents=None, createopts=None):
625 if create:
625 if create:
626 raise error.Abort(_(b'cannot create new http repository'))
626 raise error.Abort(_(b'cannot create new http repository'))
627 path = path.loc
627 try:
628 try:
628 if path.startswith(b'https:') and not urlmod.has_https:
629 if path.startswith(b'https:') and not urlmod.has_https:
629 raise error.Abort(
630 raise error.Abort(
630 _(b'Python support for SSL and HTTPS is not installed')
631 _(b'Python support for SSL and HTTPS is not installed')
631 )
632 )
632
633
633 inst = makepeer(ui, path)
634 inst = makepeer(ui, path)
634
635
635 return inst
636 return inst
636 except error.RepoError as httpexception:
637 except error.RepoError as httpexception:
637 try:
638 try:
638 r = statichttprepo.make_peer(ui, b"static-" + path, create)
639 r = statichttprepo.make_peer(ui, b"static-" + path, create)
639 ui.note(_(b'(falling back to static-http)\n'))
640 ui.note(_(b'(falling back to static-http)\n'))
640 return r
641 return r
641 except error.RepoError:
642 except error.RepoError:
642 raise httpexception # use the original http RepoError instead
643 raise httpexception # use the original http RepoError instead
@@ -1,675 +1,676 b''
1 # sshpeer.py - ssh repository proxy class for mercurial
1 # sshpeer.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import re
9 import re
10 import uuid
10 import uuid
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 wireprototypes,
18 wireprototypes,
19 wireprotov1peer,
19 wireprotov1peer,
20 wireprotov1server,
20 wireprotov1server,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 procutil,
23 procutil,
24 stringutil,
24 stringutil,
25 urlutil,
25 urlutil,
26 )
26 )
27
27
28
28
29 def _serverquote(s):
29 def _serverquote(s):
30 """quote a string for the remote shell ... which we assume is sh"""
30 """quote a string for the remote shell ... which we assume is sh"""
31 if not s:
31 if not s:
32 return s
32 return s
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
34 return s
34 return s
35 return b"'%s'" % s.replace(b"'", b"'\\''")
35 return b"'%s'" % s.replace(b"'", b"'\\''")
36
36
37
37
38 def _forwardoutput(ui, pipe, warn=False):
38 def _forwardoutput(ui, pipe, warn=False):
39 """display all data currently available on pipe as remote output.
39 """display all data currently available on pipe as remote output.
40
40
41 This is non blocking."""
41 This is non blocking."""
42 if pipe and not pipe.closed:
42 if pipe and not pipe.closed:
43 s = procutil.readpipe(pipe)
43 s = procutil.readpipe(pipe)
44 if s:
44 if s:
45 display = ui.warn if warn else ui.status
45 display = ui.warn if warn else ui.status
46 for l in s.splitlines():
46 for l in s.splitlines():
47 display(_(b"remote: "), l, b'\n')
47 display(_(b"remote: "), l, b'\n')
48
48
49
49
50 class doublepipe:
50 class doublepipe:
51 """Operate a side-channel pipe in addition of a main one
51 """Operate a side-channel pipe in addition of a main one
52
52
53 The side-channel pipe contains server output to be forwarded to the user
53 The side-channel pipe contains server output to be forwarded to the user
54 input. The double pipe will behave as the "main" pipe, but will ensure the
54 input. The double pipe will behave as the "main" pipe, but will ensure the
55 content of the "side" pipe is properly processed while we wait for blocking
55 content of the "side" pipe is properly processed while we wait for blocking
56 call on the "main" pipe.
56 call on the "main" pipe.
57
57
58 If large amounts of data are read from "main", the forward will cease after
58 If large amounts of data are read from "main", the forward will cease after
59 the first bytes start to appear. This simplifies the implementation
59 the first bytes start to appear. This simplifies the implementation
60 without affecting actual output of sshpeer too much as we rarely issue
60 without affecting actual output of sshpeer too much as we rarely issue
61 large read for data not yet emitted by the server.
61 large read for data not yet emitted by the server.
62
62
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
64 that handle all the os specific bits. This class lives in this module
64 that handle all the os specific bits. This class lives in this module
65 because it focus on behavior specific to the ssh protocol."""
65 because it focus on behavior specific to the ssh protocol."""
66
66
67 def __init__(self, ui, main, side):
67 def __init__(self, ui, main, side):
68 self._ui = ui
68 self._ui = ui
69 self._main = main
69 self._main = main
70 self._side = side
70 self._side = side
71
71
72 def _wait(self):
72 def _wait(self):
73 """wait until some data are available on main or side
73 """wait until some data are available on main or side
74
74
75 return a pair of boolean (ismainready, issideready)
75 return a pair of boolean (ismainready, issideready)
76
76
77 (This will only wait for data if the setup is supported by `util.poll`)
77 (This will only wait for data if the setup is supported by `util.poll`)
78 """
78 """
79 if (
79 if (
80 isinstance(self._main, util.bufferedinputpipe)
80 isinstance(self._main, util.bufferedinputpipe)
81 and self._main.hasbuffer
81 and self._main.hasbuffer
82 ):
82 ):
83 # Main has data. Assume side is worth poking at.
83 # Main has data. Assume side is worth poking at.
84 return True, True
84 return True, True
85
85
86 fds = [self._main.fileno(), self._side.fileno()]
86 fds = [self._main.fileno(), self._side.fileno()]
87 try:
87 try:
88 act = util.poll(fds)
88 act = util.poll(fds)
89 except NotImplementedError:
89 except NotImplementedError:
90 # non supported yet case, assume all have data.
90 # non supported yet case, assume all have data.
91 act = fds
91 act = fds
92 return (self._main.fileno() in act, self._side.fileno() in act)
92 return (self._main.fileno() in act, self._side.fileno() in act)
93
93
94 def write(self, data):
94 def write(self, data):
95 return self._call(b'write', data)
95 return self._call(b'write', data)
96
96
97 def read(self, size):
97 def read(self, size):
98 r = self._call(b'read', size)
98 r = self._call(b'read', size)
99 if size != 0 and not r:
99 if size != 0 and not r:
100 # We've observed a condition that indicates the
100 # We've observed a condition that indicates the
101 # stdout closed unexpectedly. Check stderr one
101 # stdout closed unexpectedly. Check stderr one
102 # more time and snag anything that's there before
102 # more time and snag anything that's there before
103 # letting anyone know the main part of the pipe
103 # letting anyone know the main part of the pipe
104 # closed prematurely.
104 # closed prematurely.
105 _forwardoutput(self._ui, self._side)
105 _forwardoutput(self._ui, self._side)
106 return r
106 return r
107
107
108 def unbufferedread(self, size):
108 def unbufferedread(self, size):
109 r = self._call(b'unbufferedread', size)
109 r = self._call(b'unbufferedread', size)
110 if size != 0 and not r:
110 if size != 0 and not r:
111 # We've observed a condition that indicates the
111 # We've observed a condition that indicates the
112 # stdout closed unexpectedly. Check stderr one
112 # stdout closed unexpectedly. Check stderr one
113 # more time and snag anything that's there before
113 # more time and snag anything that's there before
114 # letting anyone know the main part of the pipe
114 # letting anyone know the main part of the pipe
115 # closed prematurely.
115 # closed prematurely.
116 _forwardoutput(self._ui, self._side)
116 _forwardoutput(self._ui, self._side)
117 return r
117 return r
118
118
119 def readline(self):
119 def readline(self):
120 return self._call(b'readline')
120 return self._call(b'readline')
121
121
122 def _call(self, methname, data=None):
122 def _call(self, methname, data=None):
123 """call <methname> on "main", forward output of "side" while blocking"""
123 """call <methname> on "main", forward output of "side" while blocking"""
124 # data can be '' or 0
124 # data can be '' or 0
125 if (data is not None and not data) or self._main.closed:
125 if (data is not None and not data) or self._main.closed:
126 _forwardoutput(self._ui, self._side)
126 _forwardoutput(self._ui, self._side)
127 return b''
127 return b''
128 while True:
128 while True:
129 mainready, sideready = self._wait()
129 mainready, sideready = self._wait()
130 if sideready:
130 if sideready:
131 _forwardoutput(self._ui, self._side)
131 _forwardoutput(self._ui, self._side)
132 if mainready:
132 if mainready:
133 meth = getattr(self._main, methname)
133 meth = getattr(self._main, methname)
134 if data is None:
134 if data is None:
135 return meth()
135 return meth()
136 else:
136 else:
137 return meth(data)
137 return meth(data)
138
138
139 def close(self):
139 def close(self):
140 return self._main.close()
140 return self._main.close()
141
141
142 @property
142 @property
143 def closed(self):
143 def closed(self):
144 return self._main.closed
144 return self._main.closed
145
145
146 def flush(self):
146 def flush(self):
147 return self._main.flush()
147 return self._main.flush()
148
148
149
149
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
151 """Clean up pipes used by an SSH connection."""
151 """Clean up pipes used by an SSH connection."""
152 didsomething = False
152 didsomething = False
153 if pipeo and not pipeo.closed:
153 if pipeo and not pipeo.closed:
154 didsomething = True
154 didsomething = True
155 pipeo.close()
155 pipeo.close()
156 if pipei and not pipei.closed:
156 if pipei and not pipei.closed:
157 didsomething = True
157 didsomething = True
158 pipei.close()
158 pipei.close()
159
159
160 if pipee and not pipee.closed:
160 if pipee and not pipee.closed:
161 didsomething = True
161 didsomething = True
162 # Try to read from the err descriptor until EOF.
162 # Try to read from the err descriptor until EOF.
163 try:
163 try:
164 for l in pipee:
164 for l in pipee:
165 ui.status(_(b'remote: '), l)
165 ui.status(_(b'remote: '), l)
166 except (IOError, ValueError):
166 except (IOError, ValueError):
167 pass
167 pass
168
168
169 pipee.close()
169 pipee.close()
170
170
171 if didsomething and warn is not None:
171 if didsomething and warn is not None:
172 # Encourage explicit close of sshpeers. Closing via __del__ is
172 # Encourage explicit close of sshpeers. Closing via __del__ is
173 # not very predictable when exceptions are thrown, which has led
173 # not very predictable when exceptions are thrown, which has led
174 # to deadlocks due to a peer get gc'ed in a fork
174 # to deadlocks due to a peer get gc'ed in a fork
175 # We add our own stack trace, because the stacktrace when called
175 # We add our own stack trace, because the stacktrace when called
176 # from __del__ is useless.
176 # from __del__ is useless.
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
178
178
179
179
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 """Create an SSH connection to a server.
181 """Create an SSH connection to a server.
182
182
183 Returns a tuple of (process, stdin, stdout, stderr) for the
183 Returns a tuple of (process, stdin, stdout, stderr) for the
184 spawned process.
184 spawned process.
185 """
185 """
186 cmd = b'%s %s %s' % (
186 cmd = b'%s %s %s' % (
187 sshcmd,
187 sshcmd,
188 args,
188 args,
189 procutil.shellquote(
189 procutil.shellquote(
190 b'%s -R %s serve --stdio'
190 b'%s -R %s serve --stdio'
191 % (_serverquote(remotecmd), _serverquote(path))
191 % (_serverquote(remotecmd), _serverquote(path))
192 ),
192 ),
193 )
193 )
194
194
195 ui.debug(b'running %s\n' % cmd)
195 ui.debug(b'running %s\n' % cmd)
196
196
197 # no buffer allow the use of 'select'
197 # no buffer allow the use of 'select'
198 # feel free to remove buffering and select usage when we ultimately
198 # feel free to remove buffering and select usage when we ultimately
199 # move to threading.
199 # move to threading.
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
201
201
202 return proc, stdin, stdout, stderr
202 return proc, stdin, stdout, stderr
203
203
204
204
205 def _clientcapabilities():
205 def _clientcapabilities():
206 """Return list of capabilities of this client.
206 """Return list of capabilities of this client.
207
207
208 Returns a list of capabilities that are supported by this client.
208 Returns a list of capabilities that are supported by this client.
209 """
209 """
210 protoparams = {b'partial-pull'}
210 protoparams = {b'partial-pull'}
211 comps = [
211 comps = [
212 e.wireprotosupport().name
212 e.wireprotosupport().name
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 ]
214 ]
215 protoparams.add(b'comp=%s' % b','.join(comps))
215 protoparams.add(b'comp=%s' % b','.join(comps))
216 return protoparams
216 return protoparams
217
217
218
218
219 def _performhandshake(ui, stdin, stdout, stderr):
219 def _performhandshake(ui, stdin, stdout, stderr):
220 def badresponse():
220 def badresponse():
221 # Flush any output on stderr. In general, the stderr contains errors
221 # Flush any output on stderr. In general, the stderr contains errors
222 # from the remote (ssh errors, some hg errors), and status indications
222 # from the remote (ssh errors, some hg errors), and status indications
223 # (like "adding changes"), with no current way to tell them apart.
223 # (like "adding changes"), with no current way to tell them apart.
224 # Here we failed so early that it's almost certainly only errors, so
224 # Here we failed so early that it's almost certainly only errors, so
225 # use warn=True so -q doesn't hide them.
225 # use warn=True so -q doesn't hide them.
226 _forwardoutput(ui, stderr, warn=True)
226 _forwardoutput(ui, stderr, warn=True)
227
227
228 msg = _(b'no suitable response from remote hg')
228 msg = _(b'no suitable response from remote hg')
229 hint = ui.config(b'ui', b'ssherrorhint')
229 hint = ui.config(b'ui', b'ssherrorhint')
230 raise error.RepoError(msg, hint=hint)
230 raise error.RepoError(msg, hint=hint)
231
231
232 # The handshake consists of sending wire protocol commands in reverse
232 # The handshake consists of sending wire protocol commands in reverse
233 # order of protocol implementation and then sniffing for a response
233 # order of protocol implementation and then sniffing for a response
234 # to one of them.
234 # to one of them.
235 #
235 #
236 # Those commands (from oldest to newest) are:
236 # Those commands (from oldest to newest) are:
237 #
237 #
238 # ``between``
238 # ``between``
239 # Asks for the set of revisions between a pair of revisions. Command
239 # Asks for the set of revisions between a pair of revisions. Command
240 # present in all Mercurial server implementations.
240 # present in all Mercurial server implementations.
241 #
241 #
242 # ``hello``
242 # ``hello``
243 # Instructs the server to advertise its capabilities. Introduced in
243 # Instructs the server to advertise its capabilities. Introduced in
244 # Mercurial 0.9.1.
244 # Mercurial 0.9.1.
245 #
245 #
246 # ``upgrade``
246 # ``upgrade``
247 # Requests upgrade from default transport protocol version 1 to
247 # Requests upgrade from default transport protocol version 1 to
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
249 # feature.
249 # feature.
250 #
250 #
251 # The ``between`` command is issued with a request for the null
251 # The ``between`` command is issued with a request for the null
252 # range. If the remote is a Mercurial server, this request will
252 # range. If the remote is a Mercurial server, this request will
253 # generate a specific response: ``1\n\n``. This represents the
253 # generate a specific response: ``1\n\n``. This represents the
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
255 # in the output stream and know this is the response to ``between``
255 # in the output stream and know this is the response to ``between``
256 # and we're at the end of our handshake reply.
256 # and we're at the end of our handshake reply.
257 #
257 #
258 # The response to the ``hello`` command will be a line with the
258 # The response to the ``hello`` command will be a line with the
259 # length of the value returned by that command followed by that
259 # length of the value returned by that command followed by that
260 # value. If the server doesn't support ``hello`` (which should be
260 # value. If the server doesn't support ``hello`` (which should be
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
263 # the capabilities of the server.
263 # the capabilities of the server.
264 #
264 #
265 # The ``upgrade`` command isn't really a command in the traditional
265 # The ``upgrade`` command isn't really a command in the traditional
266 # sense of version 1 of the transport because it isn't using the
266 # sense of version 1 of the transport because it isn't using the
267 # proper mechanism for formatting insteads: instead, it just encodes
267 # proper mechanism for formatting insteads: instead, it just encodes
268 # arguments on the line, delimited by spaces.
268 # arguments on the line, delimited by spaces.
269 #
269 #
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
271 # If the server doesn't support protocol upgrades, it will reply to
271 # If the server doesn't support protocol upgrades, it will reply to
272 # this line with ``0\n``. Otherwise, it emits an
272 # this line with ``0\n``. Otherwise, it emits an
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
274 # Content immediately following this line describes additional
274 # Content immediately following this line describes additional
275 # protocol and server state.
275 # protocol and server state.
276 #
276 #
277 # In addition to the responses to our command requests, the server
277 # In addition to the responses to our command requests, the server
278 # may emit "banner" output on stdout. SSH servers are allowed to
278 # may emit "banner" output on stdout. SSH servers are allowed to
279 # print messages to stdout on login. Issuing commands on connection
279 # print messages to stdout on login. Issuing commands on connection
280 # allows us to flush this banner output from the server by scanning
280 # allows us to flush this banner output from the server by scanning
281 # for output to our well-known ``between`` command. Of course, if
281 # for output to our well-known ``between`` command. Of course, if
282 # the banner contains ``1\n\n``, this will throw off our detection.
282 # the banner contains ``1\n\n``, this will throw off our detection.
283
283
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
285
285
286 # Generate a random token to help identify responses to version 2
286 # Generate a random token to help identify responses to version 2
287 # upgrade request.
287 # upgrade request.
288 token = pycompat.sysbytes(str(uuid.uuid4()))
288 token = pycompat.sysbytes(str(uuid.uuid4()))
289
289
290 try:
290 try:
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
292 handshake = [
292 handshake = [
293 b'hello\n',
293 b'hello\n',
294 b'between\n',
294 b'between\n',
295 b'pairs %d\n' % len(pairsarg),
295 b'pairs %d\n' % len(pairsarg),
296 pairsarg,
296 pairsarg,
297 ]
297 ]
298
298
299 if requestlog:
299 if requestlog:
300 ui.debug(b'devel-peer-request: hello+between\n')
300 ui.debug(b'devel-peer-request: hello+between\n')
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
302 ui.debug(b'sending hello command\n')
302 ui.debug(b'sending hello command\n')
303 ui.debug(b'sending between command\n')
303 ui.debug(b'sending between command\n')
304
304
305 stdin.write(b''.join(handshake))
305 stdin.write(b''.join(handshake))
306 stdin.flush()
306 stdin.flush()
307 except IOError:
307 except IOError:
308 badresponse()
308 badresponse()
309
309
310 # Assume version 1 of wire protocol by default.
310 # Assume version 1 of wire protocol by default.
311 protoname = wireprototypes.SSHV1
311 protoname = wireprototypes.SSHV1
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
313
313
314 lines = [b'', b'dummy']
314 lines = [b'', b'dummy']
315 max_noise = 500
315 max_noise = 500
316 while lines[-1] and max_noise:
316 while lines[-1] and max_noise:
317 try:
317 try:
318 l = stdout.readline()
318 l = stdout.readline()
319 _forwardoutput(ui, stderr, warn=True)
319 _forwardoutput(ui, stderr, warn=True)
320
320
321 # Look for reply to protocol upgrade request. It has a token
321 # Look for reply to protocol upgrade request. It has a token
322 # in it, so there should be no false positives.
322 # in it, so there should be no false positives.
323 m = reupgraded.match(l)
323 m = reupgraded.match(l)
324 if m:
324 if m:
325 protoname = m.group(1)
325 protoname = m.group(1)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
327 # If an upgrade was handled, the ``hello`` and ``between``
327 # If an upgrade was handled, the ``hello`` and ``between``
328 # requests are ignored. The next output belongs to the
328 # requests are ignored. The next output belongs to the
329 # protocol, so stop scanning lines.
329 # protocol, so stop scanning lines.
330 break
330 break
331
331
332 # Otherwise it could be a banner, ``0\n`` response if server
332 # Otherwise it could be a banner, ``0\n`` response if server
333 # doesn't support upgrade.
333 # doesn't support upgrade.
334
334
335 if lines[-1] == b'1\n' and l == b'\n':
335 if lines[-1] == b'1\n' and l == b'\n':
336 break
336 break
337 if l:
337 if l:
338 ui.debug(b'remote: ', l)
338 ui.debug(b'remote: ', l)
339 lines.append(l)
339 lines.append(l)
340 max_noise -= 1
340 max_noise -= 1
341 except IOError:
341 except IOError:
342 badresponse()
342 badresponse()
343 else:
343 else:
344 badresponse()
344 badresponse()
345
345
346 caps = set()
346 caps = set()
347
347
348 # For version 1, we should see a ``capabilities`` line in response to the
348 # For version 1, we should see a ``capabilities`` line in response to the
349 # ``hello`` command.
349 # ``hello`` command.
350 if protoname == wireprototypes.SSHV1:
350 if protoname == wireprototypes.SSHV1:
351 for l in reversed(lines):
351 for l in reversed(lines):
352 # Look for response to ``hello`` command. Scan from the back so
352 # Look for response to ``hello`` command. Scan from the back so
353 # we don't misinterpret banner output as the command reply.
353 # we don't misinterpret banner output as the command reply.
354 if l.startswith(b'capabilities:'):
354 if l.startswith(b'capabilities:'):
355 caps.update(l[:-1].split(b':')[1].split())
355 caps.update(l[:-1].split(b':')[1].split())
356 break
356 break
357
357
358 # Error if we couldn't find capabilities, this means:
358 # Error if we couldn't find capabilities, this means:
359 #
359 #
360 # 1. Remote isn't a Mercurial server
360 # 1. Remote isn't a Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
362 # 3. Remote is a future Mercurial server that dropped ``hello``
362 # 3. Remote is a future Mercurial server that dropped ``hello``
363 # and other attempted handshake mechanisms.
363 # and other attempted handshake mechanisms.
364 if not caps:
364 if not caps:
365 badresponse()
365 badresponse()
366
366
367 # Flush any output on stderr before proceeding.
367 # Flush any output on stderr before proceeding.
368 _forwardoutput(ui, stderr, warn=True)
368 _forwardoutput(ui, stderr, warn=True)
369
369
370 return protoname, caps
370 return protoname, caps
371
371
372
372
373 class sshv1peer(wireprotov1peer.wirepeer):
373 class sshv1peer(wireprotov1peer.wirepeer):
374 def __init__(
374 def __init__(
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 ):
376 ):
377 """Create a peer from an existing SSH connection.
377 """Create a peer from an existing SSH connection.
378
378
379 ``proc`` is a handle on the underlying SSH process.
379 ``proc`` is a handle on the underlying SSH process.
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
381 pipes for that process.
381 pipes for that process.
382 ``caps`` is a set of capabilities supported by the remote.
382 ``caps`` is a set of capabilities supported by the remote.
383 ``autoreadstderr`` denotes whether to automatically read from
383 ``autoreadstderr`` denotes whether to automatically read from
384 stderr and to forward its output.
384 stderr and to forward its output.
385 """
385 """
386 super().__init__(ui)
386 super().__init__(ui)
387 self._url = url
387 self._url = url
388 # self._subprocess is unused. Keeping a handle on the process
388 # self._subprocess is unused. Keeping a handle on the process
389 # holds a reference and prevents it from being garbage collected.
389 # holds a reference and prevents it from being garbage collected.
390 self._subprocess = proc
390 self._subprocess = proc
391
391
392 # And we hook up our "doublepipe" wrapper to allow querying
392 # And we hook up our "doublepipe" wrapper to allow querying
393 # stderr any time we perform I/O.
393 # stderr any time we perform I/O.
394 if autoreadstderr:
394 if autoreadstderr:
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
396 stdin = doublepipe(ui, stdin, stderr)
396 stdin = doublepipe(ui, stdin, stderr)
397
397
398 self._pipeo = stdin
398 self._pipeo = stdin
399 self._pipei = stdout
399 self._pipei = stdout
400 self._pipee = stderr
400 self._pipee = stderr
401 self._caps = caps
401 self._caps = caps
402 self._autoreadstderr = autoreadstderr
402 self._autoreadstderr = autoreadstderr
403 self._initstack = b''.join(util.getstackframes(1))
403 self._initstack = b''.join(util.getstackframes(1))
404
404
405 # Commands that have a "framed" response where the first line of the
405 # Commands that have a "framed" response where the first line of the
406 # response contains the length of that response.
406 # response contains the length of that response.
407 _FRAMED_COMMANDS = {
407 _FRAMED_COMMANDS = {
408 b'batch',
408 b'batch',
409 }
409 }
410
410
411 # Begin of ipeerconnection interface.
411 # Begin of ipeerconnection interface.
412
412
413 def url(self):
413 def url(self):
414 return self._url
414 return self._url
415
415
416 def local(self):
416 def local(self):
417 return None
417 return None
418
418
419 def canpush(self):
419 def canpush(self):
420 return True
420 return True
421
421
422 def close(self):
422 def close(self):
423 self._cleanup()
423 self._cleanup()
424
424
425 # End of ipeerconnection interface.
425 # End of ipeerconnection interface.
426
426
427 # Begin of ipeercommands interface.
427 # Begin of ipeercommands interface.
428
428
429 def capabilities(self):
429 def capabilities(self):
430 return self._caps
430 return self._caps
431
431
432 # End of ipeercommands interface.
432 # End of ipeercommands interface.
433
433
434 def _readerr(self):
434 def _readerr(self):
435 _forwardoutput(self.ui, self._pipee)
435 _forwardoutput(self.ui, self._pipee)
436
436
437 def _abort(self, exception):
437 def _abort(self, exception):
438 self._cleanup()
438 self._cleanup()
439 raise exception
439 raise exception
440
440
441 def _cleanup(self, warn=None):
441 def _cleanup(self, warn=None):
442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
443
443
444 def __del__(self):
444 def __del__(self):
445 self._cleanup(warn=self._initstack)
445 self._cleanup(warn=self._initstack)
446
446
447 def _sendrequest(self, cmd, args, framed=False):
447 def _sendrequest(self, cmd, args, framed=False):
448 if self.ui.debugflag and self.ui.configbool(
448 if self.ui.debugflag and self.ui.configbool(
449 b'devel', b'debug.peer-request'
449 b'devel', b'debug.peer-request'
450 ):
450 ):
451 dbg = self.ui.debug
451 dbg = self.ui.debug
452 line = b'devel-peer-request: %s\n'
452 line = b'devel-peer-request: %s\n'
453 dbg(line % cmd)
453 dbg(line % cmd)
454 for key, value in sorted(args.items()):
454 for key, value in sorted(args.items()):
455 if not isinstance(value, dict):
455 if not isinstance(value, dict):
456 dbg(line % b' %s: %d bytes' % (key, len(value)))
456 dbg(line % b' %s: %d bytes' % (key, len(value)))
457 else:
457 else:
458 for dk, dv in sorted(value.items()):
458 for dk, dv in sorted(value.items()):
459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
460 self.ui.debug(b"sending %s command\n" % cmd)
460 self.ui.debug(b"sending %s command\n" % cmd)
461 self._pipeo.write(b"%s\n" % cmd)
461 self._pipeo.write(b"%s\n" % cmd)
462 _func, names = wireprotov1server.commands[cmd]
462 _func, names = wireprotov1server.commands[cmd]
463 keys = names.split()
463 keys = names.split()
464 wireargs = {}
464 wireargs = {}
465 for k in keys:
465 for k in keys:
466 if k == b'*':
466 if k == b'*':
467 wireargs[b'*'] = args
467 wireargs[b'*'] = args
468 break
468 break
469 else:
469 else:
470 wireargs[k] = args[k]
470 wireargs[k] = args[k]
471 del args[k]
471 del args[k]
472 for k, v in sorted(wireargs.items()):
472 for k, v in sorted(wireargs.items()):
473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
474 if isinstance(v, dict):
474 if isinstance(v, dict):
475 for dk, dv in v.items():
475 for dk, dv in v.items():
476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
477 self._pipeo.write(dv)
477 self._pipeo.write(dv)
478 else:
478 else:
479 self._pipeo.write(v)
479 self._pipeo.write(v)
480 self._pipeo.flush()
480 self._pipeo.flush()
481
481
482 # We know exactly how many bytes are in the response. So return a proxy
482 # We know exactly how many bytes are in the response. So return a proxy
483 # around the raw output stream that allows reading exactly this many
483 # around the raw output stream that allows reading exactly this many
484 # bytes. Callers then can read() without fear of overrunning the
484 # bytes. Callers then can read() without fear of overrunning the
485 # response.
485 # response.
486 if framed:
486 if framed:
487 amount = self._getamount()
487 amount = self._getamount()
488 return util.cappedreader(self._pipei, amount)
488 return util.cappedreader(self._pipei, amount)
489
489
490 return self._pipei
490 return self._pipei
491
491
492 def _callstream(self, cmd, **args):
492 def _callstream(self, cmd, **args):
493 args = pycompat.byteskwargs(args)
493 args = pycompat.byteskwargs(args)
494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
495
495
496 def _callcompressable(self, cmd, **args):
496 def _callcompressable(self, cmd, **args):
497 args = pycompat.byteskwargs(args)
497 args = pycompat.byteskwargs(args)
498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
499
499
500 def _call(self, cmd, **args):
500 def _call(self, cmd, **args):
501 args = pycompat.byteskwargs(args)
501 args = pycompat.byteskwargs(args)
502 return self._sendrequest(cmd, args, framed=True).read()
502 return self._sendrequest(cmd, args, framed=True).read()
503
503
504 def _callpush(self, cmd, fp, **args):
504 def _callpush(self, cmd, fp, **args):
505 # The server responds with an empty frame if the client should
505 # The server responds with an empty frame if the client should
506 # continue submitting the payload.
506 # continue submitting the payload.
507 r = self._call(cmd, **args)
507 r = self._call(cmd, **args)
508 if r:
508 if r:
509 return b'', r
509 return b'', r
510
510
511 # The payload consists of frames with content followed by an empty
511 # The payload consists of frames with content followed by an empty
512 # frame.
512 # frame.
513 for d in iter(lambda: fp.read(4096), b''):
513 for d in iter(lambda: fp.read(4096), b''):
514 self._writeframed(d)
514 self._writeframed(d)
515 self._writeframed(b"", flush=True)
515 self._writeframed(b"", flush=True)
516
516
517 # In case of success, there is an empty frame and a frame containing
517 # In case of success, there is an empty frame and a frame containing
518 # the integer result (as a string).
518 # the integer result (as a string).
519 # In case of error, there is a non-empty frame containing the error.
519 # In case of error, there is a non-empty frame containing the error.
520 r = self._readframed()
520 r = self._readframed()
521 if r:
521 if r:
522 return b'', r
522 return b'', r
523 return self._readframed(), b''
523 return self._readframed(), b''
524
524
525 def _calltwowaystream(self, cmd, fp, **args):
525 def _calltwowaystream(self, cmd, fp, **args):
526 # The server responds with an empty frame if the client should
526 # The server responds with an empty frame if the client should
527 # continue submitting the payload.
527 # continue submitting the payload.
528 r = self._call(cmd, **args)
528 r = self._call(cmd, **args)
529 if r:
529 if r:
530 # XXX needs to be made better
530 # XXX needs to be made better
531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
532
532
533 # The payload consists of frames with content followed by an empty
533 # The payload consists of frames with content followed by an empty
534 # frame.
534 # frame.
535 for d in iter(lambda: fp.read(4096), b''):
535 for d in iter(lambda: fp.read(4096), b''):
536 self._writeframed(d)
536 self._writeframed(d)
537 self._writeframed(b"", flush=True)
537 self._writeframed(b"", flush=True)
538
538
539 return self._pipei
539 return self._pipei
540
540
541 def _getamount(self):
541 def _getamount(self):
542 l = self._pipei.readline()
542 l = self._pipei.readline()
543 if l == b'\n':
543 if l == b'\n':
544 if self._autoreadstderr:
544 if self._autoreadstderr:
545 self._readerr()
545 self._readerr()
546 msg = _(b'check previous remote output')
546 msg = _(b'check previous remote output')
547 self._abort(error.OutOfBandError(hint=msg))
547 self._abort(error.OutOfBandError(hint=msg))
548 if self._autoreadstderr:
548 if self._autoreadstderr:
549 self._readerr()
549 self._readerr()
550 try:
550 try:
551 return int(l)
551 return int(l)
552 except ValueError:
552 except ValueError:
553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
554
554
555 def _readframed(self):
555 def _readframed(self):
556 size = self._getamount()
556 size = self._getamount()
557 if not size:
557 if not size:
558 return b''
558 return b''
559
559
560 return self._pipei.read(size)
560 return self._pipei.read(size)
561
561
562 def _writeframed(self, data, flush=False):
562 def _writeframed(self, data, flush=False):
563 self._pipeo.write(b"%d\n" % len(data))
563 self._pipeo.write(b"%d\n" % len(data))
564 if data:
564 if data:
565 self._pipeo.write(data)
565 self._pipeo.write(data)
566 if flush:
566 if flush:
567 self._pipeo.flush()
567 self._pipeo.flush()
568 if self._autoreadstderr:
568 if self._autoreadstderr:
569 self._readerr()
569 self._readerr()
570
570
571
571
572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
573 """Make a peer instance from existing pipes.
573 """Make a peer instance from existing pipes.
574
574
575 ``path`` and ``proc`` are stored on the eventual peer instance and may
575 ``path`` and ``proc`` are stored on the eventual peer instance and may
576 not be used for anything meaningful.
576 not be used for anything meaningful.
577
577
578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
579 SSH server's stdio handles.
579 SSH server's stdio handles.
580
580
581 This function is factored out to allow creating peers that don't
581 This function is factored out to allow creating peers that don't
582 actually spawn a new process. It is useful for starting SSH protocol
582 actually spawn a new process. It is useful for starting SSH protocol
583 servers and clients via non-standard means, which can be useful for
583 servers and clients via non-standard means, which can be useful for
584 testing.
584 testing.
585 """
585 """
586 try:
586 try:
587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
588 except Exception:
588 except Exception:
589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
590 raise
590 raise
591
591
592 if protoname == wireprototypes.SSHV1:
592 if protoname == wireprototypes.SSHV1:
593 return sshv1peer(
593 return sshv1peer(
594 ui,
594 ui,
595 path,
595 path,
596 proc,
596 proc,
597 stdin,
597 stdin,
598 stdout,
598 stdout,
599 stderr,
599 stderr,
600 caps,
600 caps,
601 autoreadstderr=autoreadstderr,
601 autoreadstderr=autoreadstderr,
602 )
602 )
603 else:
603 else:
604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
605 raise error.RepoError(
605 raise error.RepoError(
606 _(b'unknown version of SSH protocol: %s') % protoname
606 _(b'unknown version of SSH protocol: %s') % protoname
607 )
607 )
608
608
609
609
610 def make_peer(ui, path, create, intents=None, createopts=None):
610 def make_peer(ui, path, create, intents=None, createopts=None):
611 """Create an SSH peer.
611 """Create an SSH peer.
612
612
613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
614 """
614 """
615 path = path.loc
615 u = urlutil.url(path, parsequery=False, parsefragment=False)
616 u = urlutil.url(path, parsequery=False, parsefragment=False)
616 if u.scheme != b'ssh' or not u.host or u.path is None:
617 if u.scheme != b'ssh' or not u.host or u.path is None:
617 raise error.RepoError(_(b"couldn't parse location %s") % path)
618 raise error.RepoError(_(b"couldn't parse location %s") % path)
618
619
619 urlutil.checksafessh(path)
620 urlutil.checksafessh(path)
620
621
621 if u.passwd is not None:
622 if u.passwd is not None:
622 raise error.RepoError(_(b'password in URL not supported'))
623 raise error.RepoError(_(b'password in URL not supported'))
623
624
624 sshcmd = ui.config(b'ui', b'ssh')
625 sshcmd = ui.config(b'ui', b'ssh')
625 remotecmd = ui.config(b'ui', b'remotecmd')
626 remotecmd = ui.config(b'ui', b'remotecmd')
626 sshaddenv = dict(ui.configitems(b'sshenv'))
627 sshaddenv = dict(ui.configitems(b'sshenv'))
627 sshenv = procutil.shellenviron(sshaddenv)
628 sshenv = procutil.shellenviron(sshaddenv)
628 remotepath = u.path or b'.'
629 remotepath = u.path or b'.'
629
630
630 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
631 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
631
632
632 if create:
633 if create:
633 # We /could/ do this, but only if the remote init command knows how to
634 # We /could/ do this, but only if the remote init command knows how to
634 # handle them. We don't yet make any assumptions about that. And without
635 # handle them. We don't yet make any assumptions about that. And without
635 # querying the remote, there's no way of knowing if the remote even
636 # querying the remote, there's no way of knowing if the remote even
636 # supports said requested feature.
637 # supports said requested feature.
637 if createopts:
638 if createopts:
638 raise error.RepoError(
639 raise error.RepoError(
639 _(
640 _(
640 b'cannot create remote SSH repositories '
641 b'cannot create remote SSH repositories '
641 b'with extra options'
642 b'with extra options'
642 )
643 )
643 )
644 )
644
645
645 cmd = b'%s %s %s' % (
646 cmd = b'%s %s %s' % (
646 sshcmd,
647 sshcmd,
647 args,
648 args,
648 procutil.shellquote(
649 procutil.shellquote(
649 b'%s init %s'
650 b'%s init %s'
650 % (_serverquote(remotecmd), _serverquote(remotepath))
651 % (_serverquote(remotecmd), _serverquote(remotepath))
651 ),
652 ),
652 )
653 )
653 ui.debug(b'running %s\n' % cmd)
654 ui.debug(b'running %s\n' % cmd)
654 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
655 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
655 if res != 0:
656 if res != 0:
656 raise error.RepoError(_(b'could not create remote repo'))
657 raise error.RepoError(_(b'could not create remote repo'))
657
658
658 proc, stdin, stdout, stderr = _makeconnection(
659 proc, stdin, stdout, stderr = _makeconnection(
659 ui, sshcmd, args, remotecmd, remotepath, sshenv
660 ui, sshcmd, args, remotecmd, remotepath, sshenv
660 )
661 )
661
662
662 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
663 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
663
664
664 # Finally, if supported by the server, notify it about our own
665 # Finally, if supported by the server, notify it about our own
665 # capabilities.
666 # capabilities.
666 if b'protocaps' in peer.capabilities():
667 if b'protocaps' in peer.capabilities():
667 try:
668 try:
668 peer._call(
669 peer._call(
669 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
670 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
670 )
671 )
671 except IOError:
672 except IOError:
672 peer._cleanup()
673 peer._cleanup()
673 raise error.RepoError(_(b'capability exchange failed'))
674 raise error.RepoError(_(b'capability exchange failed'))
674
675
675 return peer
676 return peer
@@ -1,265 +1,266 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import sha1nodeconstants
14 from .node import sha1nodeconstants
15 from . import (
15 from . import (
16 branchmap,
16 branchmap,
17 changelog,
17 changelog,
18 error,
18 error,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 namespaces,
21 namespaces,
22 pathutil,
22 pathutil,
23 pycompat,
23 pycompat,
24 requirements as requirementsmod,
24 requirements as requirementsmod,
25 url,
25 url,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 urlutil,
30 urlutil,
31 )
31 )
32
32
33 urlerr = util.urlerr
33 urlerr = util.urlerr
34 urlreq = util.urlreq
34 urlreq = util.urlreq
35
35
36
36
37 class httprangereader:
37 class httprangereader:
38 def __init__(self, url, opener):
38 def __init__(self, url, opener):
39 # we assume opener has HTTPRangeHandler
39 # we assume opener has HTTPRangeHandler
40 self.url = url
40 self.url = url
41 self.pos = 0
41 self.pos = 0
42 self.opener = opener
42 self.opener = opener
43 self.name = url
43 self.name = url
44
44
45 def __enter__(self):
45 def __enter__(self):
46 return self
46 return self
47
47
48 def __exit__(self, exc_type, exc_value, traceback):
48 def __exit__(self, exc_type, exc_value, traceback):
49 self.close()
49 self.close()
50
50
51 def seek(self, pos):
51 def seek(self, pos):
52 self.pos = pos
52 self.pos = pos
53
53
54 def read(self, bytes=None):
54 def read(self, bytes=None):
55 req = urlreq.request(pycompat.strurl(self.url))
55 req = urlreq.request(pycompat.strurl(self.url))
56 end = b''
56 end = b''
57 if bytes:
57 if bytes:
58 end = self.pos + bytes - 1
58 end = self.pos + bytes - 1
59 if self.pos or end:
59 if self.pos or end:
60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
61
61
62 try:
62 try:
63 f = self.opener.open(req)
63 f = self.opener.open(req)
64 data = f.read()
64 data = f.read()
65 code = f.code
65 code = f.code
66 except urlerr.httperror as inst:
66 except urlerr.httperror as inst:
67 num = inst.code == 404 and errno.ENOENT or None
67 num = inst.code == 404 and errno.ENOENT or None
68 # Explicitly convert the exception to str as Py3 will try
68 # Explicitly convert the exception to str as Py3 will try
69 # convert it to local encoding and with as the HTTPResponse
69 # convert it to local encoding and with as the HTTPResponse
70 # instance doesn't support encode.
70 # instance doesn't support encode.
71 raise IOError(num, str(inst))
71 raise IOError(num, str(inst))
72 except urlerr.urlerror as inst:
72 except urlerr.urlerror as inst:
73 raise IOError(None, inst.reason)
73 raise IOError(None, inst.reason)
74
74
75 if code == 200:
75 if code == 200:
76 # HTTPRangeHandler does nothing if remote does not support
76 # HTTPRangeHandler does nothing if remote does not support
77 # Range headers and returns the full entity. Let's slice it.
77 # Range headers and returns the full entity. Let's slice it.
78 if bytes:
78 if bytes:
79 data = data[self.pos : self.pos + bytes]
79 data = data[self.pos : self.pos + bytes]
80 else:
80 else:
81 data = data[self.pos :]
81 data = data[self.pos :]
82 elif bytes:
82 elif bytes:
83 data = data[:bytes]
83 data = data[:bytes]
84 self.pos += len(data)
84 self.pos += len(data)
85 return data
85 return data
86
86
87 def readlines(self):
87 def readlines(self):
88 return self.read().splitlines(True)
88 return self.read().splitlines(True)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self.readlines())
91 return iter(self.readlines())
92
92
93 def close(self):
93 def close(self):
94 pass
94 pass
95
95
96
96
97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
98 # which was itself extracted from urlgrabber. See the last version of
98 # which was itself extracted from urlgrabber. See the last version of
99 # byterange.py from history if you need more information.
99 # byterange.py from history if you need more information.
100 class _RangeError(IOError):
100 class _RangeError(IOError):
101 """Error raised when an unsatisfiable range is requested."""
101 """Error raised when an unsatisfiable range is requested."""
102
102
103
103
104 class _HTTPRangeHandler(urlreq.basehandler):
104 class _HTTPRangeHandler(urlreq.basehandler):
105 """Handler that enables HTTP Range headers.
105 """Handler that enables HTTP Range headers.
106
106
107 This was extremely simple. The Range header is a HTTP feature to
107 This was extremely simple. The Range header is a HTTP feature to
108 begin with so all this class does is tell urllib2 that the
108 begin with so all this class does is tell urllib2 that the
109 "206 Partial Content" response from the HTTP server is what we
109 "206 Partial Content" response from the HTTP server is what we
110 expected.
110 expected.
111 """
111 """
112
112
113 def http_error_206(self, req, fp, code, msg, hdrs):
113 def http_error_206(self, req, fp, code, msg, hdrs):
114 # 206 Partial Content Response
114 # 206 Partial Content Response
115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
116 r.code = code
116 r.code = code
117 r.msg = msg
117 r.msg = msg
118 return r
118 return r
119
119
120 def http_error_416(self, req, fp, code, msg, hdrs):
120 def http_error_416(self, req, fp, code, msg, hdrs):
121 # HTTP's Range Not Satisfiable error
121 # HTTP's Range Not Satisfiable error
122 raise _RangeError(b'Requested Range Not Satisfiable')
122 raise _RangeError(b'Requested Range Not Satisfiable')
123
123
124
124
125 def build_opener(ui, authinfo):
125 def build_opener(ui, authinfo):
126 # urllib cannot handle URLs with embedded user or passwd
126 # urllib cannot handle URLs with embedded user or passwd
127 urlopener = url.opener(ui, authinfo)
127 urlopener = url.opener(ui, authinfo)
128 urlopener.add_handler(_HTTPRangeHandler())
128 urlopener.add_handler(_HTTPRangeHandler())
129
129
130 class statichttpvfs(vfsmod.abstractvfs):
130 class statichttpvfs(vfsmod.abstractvfs):
131 def __init__(self, base):
131 def __init__(self, base):
132 self.base = base
132 self.base = base
133 self.options = {}
133 self.options = {}
134
134
135 def __call__(self, path, mode=b'r', *args, **kw):
135 def __call__(self, path, mode=b'r', *args, **kw):
136 if mode not in (b'r', b'rb'):
136 if mode not in (b'r', b'rb'):
137 raise IOError(b'Permission denied')
137 raise IOError(b'Permission denied')
138 f = b"/".join((self.base, urlreq.quote(path)))
138 f = b"/".join((self.base, urlreq.quote(path)))
139 return httprangereader(f, urlopener)
139 return httprangereader(f, urlopener)
140
140
141 def join(self, path):
141 def join(self, path):
142 if path:
142 if path:
143 return pathutil.join(self.base, path)
143 return pathutil.join(self.base, path)
144 else:
144 else:
145 return self.base
145 return self.base
146
146
147 return statichttpvfs
147 return statichttpvfs
148
148
149
149
150 class statichttppeer(localrepo.localpeer):
150 class statichttppeer(localrepo.localpeer):
151 def local(self):
151 def local(self):
152 return None
152 return None
153
153
154 def canpush(self):
154 def canpush(self):
155 return False
155 return False
156
156
157
157
158 class statichttprepository(
158 class statichttprepository(
159 localrepo.localrepository, localrepo.revlogfilestorage
159 localrepo.localrepository, localrepo.revlogfilestorage
160 ):
160 ):
161 supported = localrepo.localrepository._basesupported
161 supported = localrepo.localrepository._basesupported
162
162
163 def __init__(self, ui, path):
163 def __init__(self, ui, path):
164 self._url = path
164 self._url = path
165 self.ui = ui
165 self.ui = ui
166
166
167 self.root = path
167 self.root = path
168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
169 self.path, authinfo = u.authinfo()
169 self.path, authinfo = u.authinfo()
170
170
171 vfsclass = build_opener(ui, authinfo)
171 vfsclass = build_opener(ui, authinfo)
172 self.vfs = vfsclass(self.path)
172 self.vfs = vfsclass(self.path)
173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
174 self._phasedefaults = []
174 self._phasedefaults = []
175
175
176 self.names = namespaces.namespaces()
176 self.names = namespaces.namespaces()
177 self.filtername = None
177 self.filtername = None
178 self._extrafilterid = None
178 self._extrafilterid = None
179 self._wanted_sidedata = set()
179 self._wanted_sidedata = set()
180 self.features = set()
180 self.features = set()
181
181
182 try:
182 try:
183 requirements = set(self.vfs.read(b'requires').splitlines())
183 requirements = set(self.vfs.read(b'requires').splitlines())
184 except FileNotFoundError:
184 except FileNotFoundError:
185 requirements = set()
185 requirements = set()
186
186
187 # check if it is a non-empty old-style repository
187 # check if it is a non-empty old-style repository
188 try:
188 try:
189 fp = self.vfs(b"00changelog.i")
189 fp = self.vfs(b"00changelog.i")
190 fp.read(1)
190 fp.read(1)
191 fp.close()
191 fp.close()
192 except FileNotFoundError:
192 except FileNotFoundError:
193 # we do not care about empty old-style repositories here
193 # we do not care about empty old-style repositories here
194 msg = _(b"'%s' does not appear to be an hg repository") % path
194 msg = _(b"'%s' does not appear to be an hg repository") % path
195 raise error.RepoError(msg)
195 raise error.RepoError(msg)
196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
197 storevfs = vfsclass(self.vfs.join(b'store'))
197 storevfs = vfsclass(self.vfs.join(b'store'))
198 requirements |= set(storevfs.read(b'requires').splitlines())
198 requirements |= set(storevfs.read(b'requires').splitlines())
199
199
200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
201 localrepo.ensurerequirementsrecognized(
201 localrepo.ensurerequirementsrecognized(
202 requirements, supportedrequirements
202 requirements, supportedrequirements
203 )
203 )
204 localrepo.ensurerequirementscompatible(ui, requirements)
204 localrepo.ensurerequirementscompatible(ui, requirements)
205 self.nodeconstants = sha1nodeconstants
205 self.nodeconstants = sha1nodeconstants
206 self.nullid = self.nodeconstants.nullid
206 self.nullid = self.nodeconstants.nullid
207
207
208 # setup store
208 # setup store
209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
210 self.spath = self.store.path
210 self.spath = self.store.path
211 self.svfs = self.store.opener
211 self.svfs = self.store.opener
212 self.sjoin = self.store.join
212 self.sjoin = self.store.join
213 self._filecache = {}
213 self._filecache = {}
214 self.requirements = requirements
214 self.requirements = requirements
215
215
216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
217 self.manifestlog = manifest.manifestlog(
217 self.manifestlog = manifest.manifestlog(
218 self.svfs, self, rootmanifest, self.narrowmatch()
218 self.svfs, self, rootmanifest, self.narrowmatch()
219 )
219 )
220 self.changelog = changelog.changelog(self.svfs)
220 self.changelog = changelog.changelog(self.svfs)
221 self._tags = None
221 self._tags = None
222 self.nodetagscache = None
222 self.nodetagscache = None
223 self._branchcaches = branchmap.BranchMapCache()
223 self._branchcaches = branchmap.BranchMapCache()
224 self._revbranchcache = None
224 self._revbranchcache = None
225 self.encodepats = None
225 self.encodepats = None
226 self.decodepats = None
226 self.decodepats = None
227 self._transref = None
227 self._transref = None
228
228
229 def _restrictcapabilities(self, caps):
229 def _restrictcapabilities(self, caps):
230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
231 return caps.difference([b"pushkey"])
231 return caps.difference([b"pushkey"])
232
232
233 def url(self):
233 def url(self):
234 return self._url
234 return self._url
235
235
236 def local(self):
236 def local(self):
237 return False
237 return False
238
238
239 def peer(self, path=None):
239 def peer(self, path=None):
240 return statichttppeer(self, path=path)
240 return statichttppeer(self, path=path)
241
241
242 def wlock(self, wait=True):
242 def wlock(self, wait=True):
243 raise error.LockUnavailable(
243 raise error.LockUnavailable(
244 0,
244 0,
245 _(b'lock not available'),
245 _(b'lock not available'),
246 b'lock',
246 b'lock',
247 _(b'cannot lock static-http repository'),
247 _(b'cannot lock static-http repository'),
248 )
248 )
249
249
250 def lock(self, wait=True):
250 def lock(self, wait=True):
251 raise error.LockUnavailable(
251 raise error.LockUnavailable(
252 0,
252 0,
253 _(b'lock not available'),
253 _(b'lock not available'),
254 b'lock',
254 b'lock',
255 _(b'cannot lock static-http repository'),
255 _(b'cannot lock static-http repository'),
256 )
256 )
257
257
258 def _writecaches(self):
258 def _writecaches(self):
259 pass # statichttprepository are read only
259 pass # statichttprepository are read only
260
260
261
261
262 def make_peer(ui, path, create, intents=None, createopts=None):
262 def make_peer(ui, path, create, intents=None, createopts=None):
263 if create:
263 if create:
264 raise error.Abort(_(b'cannot create new static-http repository'))
264 raise error.Abort(_(b'cannot create new static-http repository'))
265 path = path.loc
265 return statichttprepository(ui, path[7:]).peer()
266 return statichttprepository(ui, path[7:]).peer()
General Comments 0
You need to be logged in to leave comments. Login now