##// END OF EJS Templates
peer: dissolve `_peerlookup` into its last two callers...
marmoute -
r50644:be3fcd9e default
parent child Browse files
Show More
@@ -1,159 +1,167 b''
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """extend schemes with shortcuts to repository swarms
6 """extend schemes with shortcuts to repository swarms
7
7
8 This extension allows you to specify shortcuts for parent URLs with a
8 This extension allows you to specify shortcuts for parent URLs with a
9 lot of repositories to act like a scheme, for example::
9 lot of repositories to act like a scheme, for example::
10
10
11 [schemes]
11 [schemes]
12 py = http://code.python.org/hg/
12 py = http://code.python.org/hg/
13
13
14 After that you can use it like::
14 After that you can use it like::
15
15
16 hg clone py://trunk/
16 hg clone py://trunk/
17
17
18 Additionally there is support for some more complex schemas, for
18 Additionally there is support for some more complex schemas, for
19 example used by Google Code::
19 example used by Google Code::
20
20
21 [schemes]
21 [schemes]
22 gcode = http://{1}.googlecode.com/hg/
22 gcode = http://{1}.googlecode.com/hg/
23
23
24 The syntax is taken from Mercurial templates, and you have unlimited
24 The syntax is taken from Mercurial templates, and you have unlimited
25 number of variables, starting with ``{1}`` and continuing with
25 number of variables, starting with ``{1}`` and continuing with
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 just appended to an URL.
28 just appended to an URL.
29
29
30 For convenience, the extension adds these schemes by default::
30 For convenience, the extension adds these schemes by default::
31
31
32 [schemes]
32 [schemes]
33 py = http://hg.python.org/
33 py = http://hg.python.org/
34 bb = https://bitbucket.org/
34 bb = https://bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
36 gcode = https://{1}.googlecode.com/hg/
36 gcode = https://{1}.googlecode.com/hg/
37 kiln = https://{1}.kilnhg.com/Repo/
37 kiln = https://{1}.kilnhg.com/Repo/
38
38
39 You can override a predefined scheme by defining a new scheme with the
39 You can override a predefined scheme by defining a new scheme with the
40 same name.
40 same name.
41 """
41 """
42
42
43 import os
43 import os
44 import re
44 import re
45
45
46 from mercurial.i18n import _
46 from mercurial.i18n import _
47 from mercurial import (
47 from mercurial import (
48 error,
48 error,
49 extensions,
49 extensions,
50 hg,
50 hg,
51 pycompat,
51 pycompat,
52 registrar,
52 registrar,
53 templater,
53 templater,
54 )
54 )
55 from mercurial.utils import (
55 from mercurial.utils import (
56 urlutil,
56 urlutil,
57 )
57 )
58
58
59 cmdtable = {}
59 cmdtable = {}
60 command = registrar.command(cmdtable)
60 command = registrar.command(cmdtable)
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 _partre = re.compile(br'{(\d+)\}')
67 _partre = re.compile(br'{(\d+)\}')
68
68
69
69
70 class ShortRepository:
70 class ShortRepository:
71 def __init__(self, url, scheme, templater):
71 def __init__(self, url, scheme, templater):
72 self.scheme = scheme
72 self.scheme = scheme
73 self.templater = templater
73 self.templater = templater
74 self.url = url
74 self.url = url
75 try:
75 try:
76 self.parts = max(map(int, _partre.findall(self.url)))
76 self.parts = max(map(int, _partre.findall(self.url)))
77 except ValueError:
77 except ValueError:
78 self.parts = 0
78 self.parts = 0
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return b'<ShortRepository: %s>' % self.scheme
81 return b'<ShortRepository: %s>' % self.scheme
82
82
83 def instance(self, ui, url, create, intents=None, createopts=None):
83 def instance(self, ui, url, create, intents=None, createopts=None):
84 url = self.resolve(url)
84 url = self.resolve(url)
85 return hg._peerlookup(url).instance(
85 u = urlutil.url(url)
86 scheme = u.scheme or b'file'
87 if scheme in hg.peer_schemes:
88 cls = hg.peer_schemes[scheme]
89 elif scheme in hg.repo_schemes:
90 cls = hg.repo_schemes[scheme]
91 else:
92 cls = hg.LocalFactory
93 return cls.instance(
86 ui, url, create, intents=intents, createopts=createopts
94 ui, url, create, intents=intents, createopts=createopts
87 )
95 )
88
96
89 def resolve(self, url):
97 def resolve(self, url):
90 # Should this use the urlutil.url class, or is manual parsing better?
98 # Should this use the urlutil.url class, or is manual parsing better?
91 try:
99 try:
92 url = url.split(b'://', 1)[1]
100 url = url.split(b'://', 1)[1]
93 except IndexError:
101 except IndexError:
94 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
102 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
95 parts = url.split(b'/', self.parts)
103 parts = url.split(b'/', self.parts)
96 if len(parts) > self.parts:
104 if len(parts) > self.parts:
97 tail = parts[-1]
105 tail = parts[-1]
98 parts = parts[:-1]
106 parts = parts[:-1]
99 else:
107 else:
100 tail = b''
108 tail = b''
101 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
109 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
102 return b''.join(self.templater.process(self.url, context)) + tail
110 return b''.join(self.templater.process(self.url, context)) + tail
103
111
104
112
105 def hasdriveletter(orig, path):
113 def hasdriveletter(orig, path):
106 if path:
114 if path:
107 for scheme in schemes:
115 for scheme in schemes:
108 if path.startswith(scheme + b':'):
116 if path.startswith(scheme + b':'):
109 return False
117 return False
110 return orig(path)
118 return orig(path)
111
119
112
120
113 schemes = {
121 schemes = {
114 b'py': b'http://hg.python.org/',
122 b'py': b'http://hg.python.org/',
115 b'bb': b'https://bitbucket.org/',
123 b'bb': b'https://bitbucket.org/',
116 b'bb+ssh': b'ssh://hg@bitbucket.org/',
124 b'bb+ssh': b'ssh://hg@bitbucket.org/',
117 b'gcode': b'https://{1}.googlecode.com/hg/',
125 b'gcode': b'https://{1}.googlecode.com/hg/',
118 b'kiln': b'https://{1}.kilnhg.com/Repo/',
126 b'kiln': b'https://{1}.kilnhg.com/Repo/',
119 }
127 }
120
128
121
129
122 def _check_drive_letter(scheme):
130 def _check_drive_letter(scheme):
123 """check if a scheme conflict with a Windows drive letter"""
131 """check if a scheme conflict with a Windows drive letter"""
124 if (
132 if (
125 pycompat.iswindows
133 pycompat.iswindows
126 and len(scheme) == 1
134 and len(scheme) == 1
127 and scheme.isalpha()
135 and scheme.isalpha()
128 and os.path.exists(b'%s:\\' % scheme)
136 and os.path.exists(b'%s:\\' % scheme)
129 ):
137 ):
130 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
138 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
131 msg %= (scheme, scheme.upper())
139 msg %= (scheme, scheme.upper())
132 raise error.Abort(msg)
140 raise error.Abort(msg)
133
141
134
142
135 def extsetup(ui):
143 def extsetup(ui):
136 schemes.update(dict(ui.configitems(b'schemes')))
144 schemes.update(dict(ui.configitems(b'schemes')))
137 t = templater.engine(templater.parse)
145 t = templater.engine(templater.parse)
138 for scheme, url in schemes.items():
146 for scheme, url in schemes.items():
139 _check_drive_letter(schemes)
147 _check_drive_letter(schemes)
140 url_scheme = urlutil.url(url).scheme
148 url_scheme = urlutil.url(url).scheme
141 if url_scheme in hg.peer_schemes:
149 if url_scheme in hg.peer_schemes:
142 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
150 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
143 else:
151 else:
144 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
152 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
145
153
146 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
154 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
147
155
148
156
149 @command(b'debugexpandscheme', norepo=True)
157 @command(b'debugexpandscheme', norepo=True)
150 def expandscheme(ui, url, **opts):
158 def expandscheme(ui, url, **opts):
151 """given a repo path, provide the scheme-expanded path"""
159 """given a repo path, provide the scheme-expanded path"""
152 scheme = urlutil.url(url).scheme
160 scheme = urlutil.url(url).scheme
153 if scheme in hg.peer_schemes:
161 if scheme in hg.peer_schemes:
154 cls = hg.peer_schemes[scheme]
162 cls = hg.peer_schemes[scheme]
155 else:
163 else:
156 cls = hg.repo_schemes.get(scheme)
164 cls = hg.repo_schemes.get(scheme)
157 if cls is not None and isinstance(cls, ShortRepository):
165 if cls is not None and isinstance(cls, ShortRepository):
158 url = cls.resolve(url)
166 url = cls.resolve(url)
159 ui.write(url + b'\n')
167 ui.write(url + b'\n')
@@ -1,1667 +1,1664 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def _peerlookup(path):
165 u = urlutil.url(path)
166 scheme = u.scheme or b'file'
167 if scheme in peer_schemes:
168 return peer_schemes[scheme]
169 if scheme in repo_schemes:
170 return repo_schemes[scheme]
171 return LocalFactory
172
173
174 def islocal(repo):
164 def islocal(repo):
175 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
176 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
177 cls = _peerlookup(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
171 elif scheme in repo_schemes:
172 cls = repo_schemes[scheme]
173 else:
174 cls = LocalFactory
178 cls.instance # make sure we load the module
175 cls.instance # make sure we load the module
179 if util.safehasattr(cls, 'islocal'):
176 if util.safehasattr(cls, 'islocal'):
180 return cls.islocal(repo) # pytype: disable=module-attr
177 return cls.islocal(repo) # pytype: disable=module-attr
181 return False
178 return False
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
183 return repo.local()
180 return repo.local()
184
181
185
182
186 def openpath(ui, path, sendaccept=True):
183 def openpath(ui, path, sendaccept=True):
187 '''open path with open if local, url.open if remote'''
184 '''open path with open if local, url.open if remote'''
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
189 if pathurl.islocal():
186 if pathurl.islocal():
190 return util.posixfile(pathurl.localpath(), b'rb')
187 return util.posixfile(pathurl.localpath(), b'rb')
191 else:
188 else:
192 return url.open(ui, path, sendaccept=sendaccept)
189 return url.open(ui, path, sendaccept=sendaccept)
193
190
194
191
195 # a list of (ui, repo) functions called for wire peer initialization
192 # a list of (ui, repo) functions called for wire peer initialization
196 wirepeersetupfuncs = []
193 wirepeersetupfuncs = []
197
194
198
195
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 ui = getattr(obj, "ui", ui)
197 ui = getattr(obj, "ui", ui)
201 for f in presetupfuncs or []:
198 for f in presetupfuncs or []:
202 f(ui, obj)
199 f(ui, obj)
203 ui.log(b'extension', b'- executing reposetup hooks\n')
200 ui.log(b'extension', b'- executing reposetup hooks\n')
204 with util.timedcm('all reposetup') as allreposetupstats:
201 with util.timedcm('all reposetup') as allreposetupstats:
205 for name, module in extensions.extensions(ui):
202 for name, module in extensions.extensions(ui):
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 hook = getattr(module, 'reposetup', None)
204 hook = getattr(module, 'reposetup', None)
208 if hook:
205 if hook:
209 with util.timedcm('reposetup %r', name) as stats:
206 with util.timedcm('reposetup %r', name) as stats:
210 hook(ui, obj)
207 hook(ui, obj)
211 msg = b' > reposetup for %s took %s\n'
208 msg = b' > reposetup for %s took %s\n'
212 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', msg, name, stats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 if not obj.local():
211 if not obj.local():
215 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
216 f(ui, obj)
213 f(ui, obj)
217
214
218
215
219 def repository(
216 def repository(
220 ui,
217 ui,
221 path=b'',
218 path=b'',
222 create=False,
219 create=False,
223 presetupfuncs=None,
220 presetupfuncs=None,
224 intents=None,
221 intents=None,
225 createopts=None,
222 createopts=None,
226 ):
223 ):
227 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
228 scheme = urlutil.url(path).scheme
225 scheme = urlutil.url(path).scheme
229 if scheme is None:
226 if scheme is None:
230 scheme = b'file'
227 scheme = b'file'
231 cls = repo_schemes.get(scheme)
228 cls = repo_schemes.get(scheme)
232 if cls is None:
229 if cls is None:
233 if scheme in peer_schemes:
230 if scheme in peer_schemes:
234 raise error.Abort(_(b"repository '%s' is not local") % path)
231 raise error.Abort(_(b"repository '%s' is not local") % path)
235 cls = LocalFactory
232 cls = LocalFactory
236 repo = cls.instance(
233 repo = cls.instance(
237 ui,
234 ui,
238 path,
235 path,
239 create,
236 create,
240 intents=intents,
237 intents=intents,
241 createopts=createopts,
238 createopts=createopts,
242 )
239 )
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
244 return repo.filtered(b'visible')
241 return repo.filtered(b'visible')
245
242
246
243
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
248 '''return a repository peer for the specified path'''
245 '''return a repository peer for the specified path'''
249 rui = remoteui(uiorrepo, opts)
246 rui = remoteui(uiorrepo, opts)
250 if util.safehasattr(path, 'url'):
247 if util.safehasattr(path, 'url'):
251 # this is a urlutil.path object
248 # this is a urlutil.path object
252 scheme = path.url.scheme # pytype: disable=attribute-error
249 scheme = path.url.scheme # pytype: disable=attribute-error
253 # XXX for now we don't do anything more than that
250 # XXX for now we don't do anything more than that
254 path = path.loc # pytype: disable=attribute-error
251 path = path.loc # pytype: disable=attribute-error
255 else:
252 else:
256 scheme = urlutil.url(path).scheme
253 scheme = urlutil.url(path).scheme
257 if scheme in peer_schemes:
254 if scheme in peer_schemes:
258 cls = peer_schemes[scheme]
255 cls = peer_schemes[scheme]
259 peer = cls.instance(
256 peer = cls.instance(
260 rui,
257 rui,
261 path,
258 path,
262 create,
259 create,
263 intents=intents,
260 intents=intents,
264 createopts=createopts,
261 createopts=createopts,
265 )
262 )
266 _setup_repo_or_peer(rui, peer)
263 _setup_repo_or_peer(rui, peer)
267 else:
264 else:
268 # this is a repository
265 # this is a repository
269 repo = repository(
266 repo = repository(
270 rui,
267 rui,
271 path,
268 path,
272 create,
269 create,
273 intents=intents,
270 intents=intents,
274 createopts=createopts,
271 createopts=createopts,
275 )
272 )
276 peer = repo.peer()
273 peer = repo.peer()
277 return peer
274 return peer
278
275
279
276
280 def defaultdest(source):
277 def defaultdest(source):
281 """return default destination of clone if none is given
278 """return default destination of clone if none is given
282
279
283 >>> defaultdest(b'foo')
280 >>> defaultdest(b'foo')
284 'foo'
281 'foo'
285 >>> defaultdest(b'/foo/bar')
282 >>> defaultdest(b'/foo/bar')
286 'bar'
283 'bar'
287 >>> defaultdest(b'/')
284 >>> defaultdest(b'/')
288 ''
285 ''
289 >>> defaultdest(b'')
286 >>> defaultdest(b'')
290 ''
287 ''
291 >>> defaultdest(b'http://example.org/')
288 >>> defaultdest(b'http://example.org/')
292 ''
289 ''
293 >>> defaultdest(b'http://example.org/foo/')
290 >>> defaultdest(b'http://example.org/foo/')
294 'foo'
291 'foo'
295 """
292 """
296 path = urlutil.url(source).path
293 path = urlutil.url(source).path
297 if not path:
294 if not path:
298 return b''
295 return b''
299 return os.path.basename(os.path.normpath(path))
296 return os.path.basename(os.path.normpath(path))
300
297
301
298
302 def sharedreposource(repo):
299 def sharedreposource(repo):
303 """Returns repository object for source repository of a shared repo.
300 """Returns repository object for source repository of a shared repo.
304
301
305 If repo is not a shared repository, returns None.
302 If repo is not a shared repository, returns None.
306 """
303 """
307 if repo.sharedpath == repo.path:
304 if repo.sharedpath == repo.path:
308 return None
305 return None
309
306
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
307 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 return repo.srcrepo
308 return repo.srcrepo
312
309
313 # the sharedpath always ends in the .hg; we want the path to the repo
310 # the sharedpath always ends in the .hg; we want the path to the repo
314 source = repo.vfs.split(repo.sharedpath)[0]
311 source = repo.vfs.split(repo.sharedpath)[0]
315 srcurl, branches = urlutil.parseurl(source)
312 srcurl, branches = urlutil.parseurl(source)
316 srcrepo = repository(repo.ui, srcurl)
313 srcrepo = repository(repo.ui, srcurl)
317 repo.srcrepo = srcrepo
314 repo.srcrepo = srcrepo
318 return srcrepo
315 return srcrepo
319
316
320
317
321 def share(
318 def share(
322 ui,
319 ui,
323 source,
320 source,
324 dest=None,
321 dest=None,
325 update=True,
322 update=True,
326 bookmarks=True,
323 bookmarks=True,
327 defaultpath=None,
324 defaultpath=None,
328 relative=False,
325 relative=False,
329 ):
326 ):
330 '''create a shared repository'''
327 '''create a shared repository'''
331
328
332 not_local_msg = _(b'can only share local repositories')
329 not_local_msg = _(b'can only share local repositories')
333 if util.safehasattr(source, 'local'):
330 if util.safehasattr(source, 'local'):
334 if source.local() is None:
331 if source.local() is None:
335 raise error.Abort(not_local_msg)
332 raise error.Abort(not_local_msg)
336 elif not islocal(source):
333 elif not islocal(source):
337 # XXX why are we getting bytes here ?
334 # XXX why are we getting bytes here ?
338 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
339
336
340 if not dest:
337 if not dest:
341 dest = defaultdest(source)
338 dest = defaultdest(source)
342 else:
339 else:
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
340 dest = urlutil.get_clone_path_obj(ui, dest).loc
344
341
345 if isinstance(source, bytes):
342 if isinstance(source, bytes):
346 source_path = urlutil.get_clone_path_obj(ui, source)
343 source_path = urlutil.get_clone_path_obj(ui, source)
347 srcrepo = repository(ui, source_path.loc)
344 srcrepo = repository(ui, source_path.loc)
348 branches = (source_path.branch, [])
345 branches = (source_path.branch, [])
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
346 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 else:
347 else:
351 srcrepo = source.local()
348 srcrepo = source.local()
352 checkout = None
349 checkout = None
353
350
354 shareditems = set()
351 shareditems = set()
355 if bookmarks:
352 if bookmarks:
356 shareditems.add(sharedbookmarks)
353 shareditems.add(sharedbookmarks)
357
354
358 r = repository(
355 r = repository(
359 ui,
356 ui,
360 dest,
357 dest,
361 create=True,
358 create=True,
362 createopts={
359 createopts={
363 b'sharedrepo': srcrepo,
360 b'sharedrepo': srcrepo,
364 b'sharedrelative': relative,
361 b'sharedrelative': relative,
365 b'shareditems': shareditems,
362 b'shareditems': shareditems,
366 },
363 },
367 )
364 )
368
365
369 postshare(srcrepo, r, defaultpath=defaultpath)
366 postshare(srcrepo, r, defaultpath=defaultpath)
370 r = repository(ui, dest)
367 r = repository(ui, dest)
371 _postshareupdate(r, update, checkout=checkout)
368 _postshareupdate(r, update, checkout=checkout)
372 return r
369 return r
373
370
374
371
375 def _prependsourcehgrc(repo):
372 def _prependsourcehgrc(repo):
376 """copies the source repo config and prepend it in current repo .hg/hgrc
373 """copies the source repo config and prepend it in current repo .hg/hgrc
377 on unshare. This is only done if the share was perfomed using share safe
374 on unshare. This is only done if the share was perfomed using share safe
378 method where we share config of source in shares"""
375 method where we share config of source in shares"""
379 srcvfs = vfsmod.vfs(repo.sharedpath)
376 srcvfs = vfsmod.vfs(repo.sharedpath)
380 dstvfs = vfsmod.vfs(repo.path)
377 dstvfs = vfsmod.vfs(repo.path)
381
378
382 if not srcvfs.exists(b'hgrc'):
379 if not srcvfs.exists(b'hgrc'):
383 return
380 return
384
381
385 currentconfig = b''
382 currentconfig = b''
386 if dstvfs.exists(b'hgrc'):
383 if dstvfs.exists(b'hgrc'):
387 currentconfig = dstvfs.read(b'hgrc')
384 currentconfig = dstvfs.read(b'hgrc')
388
385
389 with dstvfs(b'hgrc', b'wb') as fp:
386 with dstvfs(b'hgrc', b'wb') as fp:
390 sourceconfig = srcvfs.read(b'hgrc')
387 sourceconfig = srcvfs.read(b'hgrc')
391 fp.write(b"# Config copied from shared source\n")
388 fp.write(b"# Config copied from shared source\n")
392 fp.write(sourceconfig)
389 fp.write(sourceconfig)
393 fp.write(b'\n')
390 fp.write(b'\n')
394 fp.write(currentconfig)
391 fp.write(currentconfig)
395
392
396
393
397 def unshare(ui, repo):
394 def unshare(ui, repo):
398 """convert a shared repository to a normal one
395 """convert a shared repository to a normal one
399
396
400 Copy the store data to the repo and remove the sharedpath data.
397 Copy the store data to the repo and remove the sharedpath data.
401
398
402 Returns a new repository object representing the unshared repository.
399 Returns a new repository object representing the unshared repository.
403
400
404 The passed repository object is not usable after this function is
401 The passed repository object is not usable after this function is
405 called.
402 called.
406 """
403 """
407
404
408 with repo.lock():
405 with repo.lock():
409 # we use locks here because if we race with commit, we
406 # we use locks here because if we race with commit, we
410 # can end up with extra data in the cloned revlogs that's
407 # can end up with extra data in the cloned revlogs that's
411 # not pointed to by changesets, thus causing verify to
408 # not pointed to by changesets, thus causing verify to
412 # fail
409 # fail
413 destlock = copystore(ui, repo, repo.path)
410 destlock = copystore(ui, repo, repo.path)
414 with destlock or util.nullcontextmanager():
411 with destlock or util.nullcontextmanager():
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
412 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 # we were sharing .hg/hgrc of the share source with the current
413 # we were sharing .hg/hgrc of the share source with the current
417 # repo. We need to copy that while unsharing otherwise it can
414 # repo. We need to copy that while unsharing otherwise it can
418 # disable hooks and other checks
415 # disable hooks and other checks
419 _prependsourcehgrc(repo)
416 _prependsourcehgrc(repo)
420
417
421 sharefile = repo.vfs.join(b'sharedpath')
418 sharefile = repo.vfs.join(b'sharedpath')
422 util.rename(sharefile, sharefile + b'.old')
419 util.rename(sharefile, sharefile + b'.old')
423
420
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
421 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
422 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 scmutil.writereporequirements(repo)
423 scmutil.writereporequirements(repo)
427
424
428 # Removing share changes some fundamental properties of the repo instance.
425 # Removing share changes some fundamental properties of the repo instance.
429 # So we instantiate a new repo object and operate on it rather than
426 # So we instantiate a new repo object and operate on it rather than
430 # try to keep the existing repo usable.
427 # try to keep the existing repo usable.
431 newrepo = repository(repo.baseui, repo.root, create=False)
428 newrepo = repository(repo.baseui, repo.root, create=False)
432
429
433 # TODO: figure out how to access subrepos that exist, but were previously
430 # TODO: figure out how to access subrepos that exist, but were previously
434 # removed from .hgsub
431 # removed from .hgsub
435 c = newrepo[b'.']
432 c = newrepo[b'.']
436 subs = c.substate
433 subs = c.substate
437 for s in sorted(subs):
434 for s in sorted(subs):
438 c.sub(s).unshare()
435 c.sub(s).unshare()
439
436
440 localrepo.poisonrepository(repo)
437 localrepo.poisonrepository(repo)
441
438
442 return newrepo
439 return newrepo
443
440
444
441
445 def postshare(sourcerepo, destrepo, defaultpath=None):
442 def postshare(sourcerepo, destrepo, defaultpath=None):
446 """Called after a new shared repo is created.
443 """Called after a new shared repo is created.
447
444
448 The new repo only has a requirements file and pointer to the source.
445 The new repo only has a requirements file and pointer to the source.
449 This function configures additional shared data.
446 This function configures additional shared data.
450
447
451 Extensions can wrap this function and write additional entries to
448 Extensions can wrap this function and write additional entries to
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
449 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 """
450 """
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
451 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 if default:
452 if default:
456 template = b'[paths]\ndefault = %s\n'
453 template = b'[paths]\ndefault = %s\n'
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
454 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
455 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 with destrepo.wlock():
456 with destrepo.wlock():
460 narrowspec.copytoworkingcopy(destrepo)
457 narrowspec.copytoworkingcopy(destrepo)
461
458
462
459
463 def _postshareupdate(repo, update, checkout=None):
460 def _postshareupdate(repo, update, checkout=None):
464 """Maybe perform a working directory update after a shared repo is created.
461 """Maybe perform a working directory update after a shared repo is created.
465
462
466 ``update`` can be a boolean or a revision to update to.
463 ``update`` can be a boolean or a revision to update to.
467 """
464 """
468 if not update:
465 if not update:
469 return
466 return
470
467
471 repo.ui.status(_(b"updating working directory\n"))
468 repo.ui.status(_(b"updating working directory\n"))
472 if update is not True:
469 if update is not True:
473 checkout = update
470 checkout = update
474 for test in (checkout, b'default', b'tip'):
471 for test in (checkout, b'default', b'tip'):
475 if test is None:
472 if test is None:
476 continue
473 continue
477 try:
474 try:
478 uprev = repo.lookup(test)
475 uprev = repo.lookup(test)
479 break
476 break
480 except error.RepoLookupError:
477 except error.RepoLookupError:
481 continue
478 continue
482 _update(repo, uprev)
479 _update(repo, uprev)
483
480
484
481
485 def copystore(ui, srcrepo, destpath):
482 def copystore(ui, srcrepo, destpath):
486 """copy files from store of srcrepo in destpath
483 """copy files from store of srcrepo in destpath
487
484
488 returns destlock
485 returns destlock
489 """
486 """
490 destlock = None
487 destlock = None
491 try:
488 try:
492 hardlink = None
489 hardlink = None
493 topic = _(b'linking') if hardlink else _(b'copying')
490 topic = _(b'linking') if hardlink else _(b'copying')
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
491 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 num = 0
492 num = 0
496 srcpublishing = srcrepo.publishing()
493 srcpublishing = srcrepo.publishing()
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
494 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 dstvfs = vfsmod.vfs(destpath)
495 dstvfs = vfsmod.vfs(destpath)
499 for f in srcrepo.store.copylist():
496 for f in srcrepo.store.copylist():
500 if srcpublishing and f.endswith(b'phaseroots'):
497 if srcpublishing and f.endswith(b'phaseroots'):
501 continue
498 continue
502 dstbase = os.path.dirname(f)
499 dstbase = os.path.dirname(f)
503 if dstbase and not dstvfs.exists(dstbase):
500 if dstbase and not dstvfs.exists(dstbase):
504 dstvfs.mkdir(dstbase)
501 dstvfs.mkdir(dstbase)
505 if srcvfs.exists(f):
502 if srcvfs.exists(f):
506 if f.endswith(b'data'):
503 if f.endswith(b'data'):
507 # 'dstbase' may be empty (e.g. revlog format 0)
504 # 'dstbase' may be empty (e.g. revlog format 0)
508 lockfile = os.path.join(dstbase, b"lock")
505 lockfile = os.path.join(dstbase, b"lock")
509 # lock to avoid premature writing to the target
506 # lock to avoid premature writing to the target
510 destlock = lock.lock(dstvfs, lockfile)
507 destlock = lock.lock(dstvfs, lockfile)
511 hardlink, n = util.copyfiles(
508 hardlink, n = util.copyfiles(
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
509 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 )
510 )
514 num += n
511 num += n
515 if hardlink:
512 if hardlink:
516 ui.debug(b"linked %d files\n" % num)
513 ui.debug(b"linked %d files\n" % num)
517 else:
514 else:
518 ui.debug(b"copied %d files\n" % num)
515 ui.debug(b"copied %d files\n" % num)
519 return destlock
516 return destlock
520 except: # re-raises
517 except: # re-raises
521 release(destlock)
518 release(destlock)
522 raise
519 raise
523
520
524
521
525 def clonewithshare(
522 def clonewithshare(
526 ui,
523 ui,
527 peeropts,
524 peeropts,
528 sharepath,
525 sharepath,
529 source,
526 source,
530 srcpeer,
527 srcpeer,
531 dest,
528 dest,
532 pull=False,
529 pull=False,
533 rev=None,
530 rev=None,
534 update=True,
531 update=True,
535 stream=False,
532 stream=False,
536 ):
533 ):
537 """Perform a clone using a shared repo.
534 """Perform a clone using a shared repo.
538
535
539 The store for the repository will be located at <sharepath>/.hg. The
536 The store for the repository will be located at <sharepath>/.hg. The
540 specified revisions will be cloned or pulled from "source". A shared repo
537 specified revisions will be cloned or pulled from "source". A shared repo
541 will be created at "dest" and a working copy will be created if "update" is
538 will be created at "dest" and a working copy will be created if "update" is
542 True.
539 True.
543 """
540 """
544 revs = None
541 revs = None
545 if rev:
542 if rev:
546 if not srcpeer.capable(b'lookup'):
543 if not srcpeer.capable(b'lookup'):
547 raise error.Abort(
544 raise error.Abort(
548 _(
545 _(
549 b"src repository does not support "
546 b"src repository does not support "
550 b"revision lookup and so doesn't "
547 b"revision lookup and so doesn't "
551 b"support clone by revision"
548 b"support clone by revision"
552 )
549 )
553 )
550 )
554
551
555 # TODO this is batchable.
552 # TODO this is batchable.
556 remoterevs = []
553 remoterevs = []
557 for r in rev:
554 for r in rev:
558 with srcpeer.commandexecutor() as e:
555 with srcpeer.commandexecutor() as e:
559 remoterevs.append(
556 remoterevs.append(
560 e.callcommand(
557 e.callcommand(
561 b'lookup',
558 b'lookup',
562 {
559 {
563 b'key': r,
560 b'key': r,
564 },
561 },
565 ).result()
562 ).result()
566 )
563 )
567 revs = remoterevs
564 revs = remoterevs
568
565
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
566 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # 2 clients may race creating or populating it.
567 # 2 clients may race creating or populating it.
571 pooldir = os.path.dirname(sharepath)
568 pooldir = os.path.dirname(sharepath)
572 # lock class requires the directory to exist.
569 # lock class requires the directory to exist.
573 try:
570 try:
574 util.makedir(pooldir, False)
571 util.makedir(pooldir, False)
575 except FileExistsError:
572 except FileExistsError:
576 pass
573 pass
577
574
578 poolvfs = vfsmod.vfs(pooldir)
575 poolvfs = vfsmod.vfs(pooldir)
579 basename = os.path.basename(sharepath)
576 basename = os.path.basename(sharepath)
580
577
581 with lock.lock(poolvfs, b'%s.lock' % basename):
578 with lock.lock(poolvfs, b'%s.lock' % basename):
582 if os.path.exists(sharepath):
579 if os.path.exists(sharepath):
583 ui.status(
580 ui.status(
584 _(b'(sharing from existing pooled repository %s)\n') % basename
581 _(b'(sharing from existing pooled repository %s)\n') % basename
585 )
582 )
586 else:
583 else:
587 ui.status(
584 ui.status(
588 _(b'(sharing from new pooled repository %s)\n') % basename
585 _(b'(sharing from new pooled repository %s)\n') % basename
589 )
586 )
590 # Always use pull mode because hardlinks in share mode don't work
587 # Always use pull mode because hardlinks in share mode don't work
591 # well. Never update because working copies aren't necessary in
588 # well. Never update because working copies aren't necessary in
592 # share mode.
589 # share mode.
593 clone(
590 clone(
594 ui,
591 ui,
595 peeropts,
592 peeropts,
596 source,
593 source,
597 dest=sharepath,
594 dest=sharepath,
598 pull=True,
595 pull=True,
599 revs=rev,
596 revs=rev,
600 update=False,
597 update=False,
601 stream=stream,
598 stream=stream,
602 )
599 )
603
600
604 # Resolve the value to put in [paths] section for the source.
601 # Resolve the value to put in [paths] section for the source.
605 if islocal(source):
602 if islocal(source):
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
603 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 else:
604 else:
608 defaultpath = source
605 defaultpath = source
609
606
610 sharerepo = repository(ui, path=sharepath)
607 sharerepo = repository(ui, path=sharepath)
611 destrepo = share(
608 destrepo = share(
612 ui,
609 ui,
613 sharerepo,
610 sharerepo,
614 dest=dest,
611 dest=dest,
615 update=False,
612 update=False,
616 bookmarks=False,
613 bookmarks=False,
617 defaultpath=defaultpath,
614 defaultpath=defaultpath,
618 )
615 )
619
616
620 # We need to perform a pull against the dest repo to fetch bookmarks
617 # We need to perform a pull against the dest repo to fetch bookmarks
621 # and other non-store data that isn't shared by default. In the case of
618 # and other non-store data that isn't shared by default. In the case of
622 # non-existing shared repo, this means we pull from the remote twice. This
619 # non-existing shared repo, this means we pull from the remote twice. This
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
620 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # way to pull just non-changegroup data.
621 # way to pull just non-changegroup data.
625 exchange.pull(destrepo, srcpeer, heads=revs)
622 exchange.pull(destrepo, srcpeer, heads=revs)
626
623
627 _postshareupdate(destrepo, update)
624 _postshareupdate(destrepo, update)
628
625
629 return srcpeer, peer(ui, peeropts, dest)
626 return srcpeer, peer(ui, peeropts, dest)
630
627
631
628
632 # Recomputing caches is often slow on big repos, so copy them.
629 # Recomputing caches is often slow on big repos, so copy them.
633 def _copycache(srcrepo, dstcachedir, fname):
630 def _copycache(srcrepo, dstcachedir, fname):
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
631 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 srcfname = srcrepo.cachevfs.join(fname)
632 srcfname = srcrepo.cachevfs.join(fname)
636 dstfname = os.path.join(dstcachedir, fname)
633 dstfname = os.path.join(dstcachedir, fname)
637 if os.path.exists(srcfname):
634 if os.path.exists(srcfname):
638 if not os.path.exists(dstcachedir):
635 if not os.path.exists(dstcachedir):
639 os.mkdir(dstcachedir)
636 os.mkdir(dstcachedir)
640 util.copyfile(srcfname, dstfname)
637 util.copyfile(srcfname, dstfname)
641
638
642
639
643 def clone(
640 def clone(
644 ui,
641 ui,
645 peeropts,
642 peeropts,
646 source,
643 source,
647 dest=None,
644 dest=None,
648 pull=False,
645 pull=False,
649 revs=None,
646 revs=None,
650 update=True,
647 update=True,
651 stream=False,
648 stream=False,
652 branch=None,
649 branch=None,
653 shareopts=None,
650 shareopts=None,
654 storeincludepats=None,
651 storeincludepats=None,
655 storeexcludepats=None,
652 storeexcludepats=None,
656 depth=None,
653 depth=None,
657 ):
654 ):
658 """Make a copy of an existing repository.
655 """Make a copy of an existing repository.
659
656
660 Create a copy of an existing repository in a new directory. The
657 Create a copy of an existing repository in a new directory. The
661 source and destination are URLs, as passed to the repository
658 source and destination are URLs, as passed to the repository
662 function. Returns a pair of repository peers, the source and
659 function. Returns a pair of repository peers, the source and
663 newly created destination.
660 newly created destination.
664
661
665 The location of the source is added to the new repository's
662 The location of the source is added to the new repository's
666 .hg/hgrc file, as the default to be used for future pulls and
663 .hg/hgrc file, as the default to be used for future pulls and
667 pushes.
664 pushes.
668
665
669 If an exception is raised, the partly cloned/updated destination
666 If an exception is raised, the partly cloned/updated destination
670 repository will be deleted.
667 repository will be deleted.
671
668
672 Arguments:
669 Arguments:
673
670
674 source: repository object or URL
671 source: repository object or URL
675
672
676 dest: URL of destination repository to create (defaults to base
673 dest: URL of destination repository to create (defaults to base
677 name of source repository)
674 name of source repository)
678
675
679 pull: always pull from source repository, even in local case or if the
676 pull: always pull from source repository, even in local case or if the
680 server prefers streaming
677 server prefers streaming
681
678
682 stream: stream raw data uncompressed from repository (fast over
679 stream: stream raw data uncompressed from repository (fast over
683 LAN, slow over WAN)
680 LAN, slow over WAN)
684
681
685 revs: revision to clone up to (implies pull=True)
682 revs: revision to clone up to (implies pull=True)
686
683
687 update: update working directory after clone completes, if
684 update: update working directory after clone completes, if
688 destination is local repository (True means update to default rev,
685 destination is local repository (True means update to default rev,
689 anything else is treated as a revision)
686 anything else is treated as a revision)
690
687
691 branch: branches to clone
688 branch: branches to clone
692
689
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
690 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 activates auto sharing mode and defines the directory for stores. The
691 activates auto sharing mode and defines the directory for stores. The
695 "mode" key determines how to construct the directory name of the shared
692 "mode" key determines how to construct the directory name of the shared
696 repository. "identity" means the name is derived from the node of the first
693 repository. "identity" means the name is derived from the node of the first
697 changeset in the repository. "remote" means the name is derived from the
694 changeset in the repository. "remote" means the name is derived from the
698 remote's path/URL. Defaults to "identity."
695 remote's path/URL. Defaults to "identity."
699
696
700 storeincludepats and storeexcludepats: sets of file patterns to include and
697 storeincludepats and storeexcludepats: sets of file patterns to include and
701 exclude in the repository copy, respectively. If not defined, all files
698 exclude in the repository copy, respectively. If not defined, all files
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
699 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 only the requested files will be performed. If ``storeincludepats`` is not
700 only the requested files will be performed. If ``storeincludepats`` is not
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
701 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 ``path:.``. If both are empty sets, no files will be cloned.
702 ``path:.``. If both are empty sets, no files will be cloned.
706 """
703 """
707
704
708 if isinstance(source, bytes):
705 if isinstance(source, bytes):
709 src_path = urlutil.get_clone_path_obj(ui, source)
706 src_path = urlutil.get_clone_path_obj(ui, source)
710 if src_path is None:
707 if src_path is None:
711 srcpeer = peer(ui, peeropts, b'')
708 srcpeer = peer(ui, peeropts, b'')
712 origsource = source = b''
709 origsource = source = b''
713 branches = (None, branch or [])
710 branches = (None, branch or [])
714 else:
711 else:
715 srcpeer = peer(ui, peeropts, src_path)
712 srcpeer = peer(ui, peeropts, src_path)
716 origsource = src_path.rawloc
713 origsource = src_path.rawloc
717 branches = (src_path.branch, branch or [])
714 branches = (src_path.branch, branch or [])
718 source = src_path.loc
715 source = src_path.loc
719 else:
716 else:
720 if util.safehasattr(source, 'peer'):
717 if util.safehasattr(source, 'peer'):
721 srcpeer = source.peer() # in case we were called with a localrepo
718 srcpeer = source.peer() # in case we were called with a localrepo
722 else:
719 else:
723 srcpeer = source
720 srcpeer = source
724 branches = (None, branch or [])
721 branches = (None, branch or [])
725 # XXX path: simply use the peer `path` object when this become available
722 # XXX path: simply use the peer `path` object when this become available
726 origsource = source = srcpeer.url()
723 origsource = source = srcpeer.url()
727 srclock = destlock = destwlock = cleandir = None
724 srclock = destlock = destwlock = cleandir = None
728 destpeer = None
725 destpeer = None
729 try:
726 try:
730 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
727 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
731
728
732 if dest is None:
729 if dest is None:
733 dest = defaultdest(source)
730 dest = defaultdest(source)
734 if dest:
731 if dest:
735 ui.status(_(b"destination directory: %s\n") % dest)
732 ui.status(_(b"destination directory: %s\n") % dest)
736 else:
733 else:
737 dest_path = urlutil.get_clone_path_obj(ui, dest)
734 dest_path = urlutil.get_clone_path_obj(ui, dest)
738 if dest_path is not None:
735 if dest_path is not None:
739 dest = dest_path.rawloc
736 dest = dest_path.rawloc
740 else:
737 else:
741 dest = b''
738 dest = b''
742
739
743 dest = urlutil.urllocalpath(dest)
740 dest = urlutil.urllocalpath(dest)
744 source = urlutil.urllocalpath(source)
741 source = urlutil.urllocalpath(source)
745
742
746 if not dest:
743 if not dest:
747 raise error.InputError(_(b"empty destination path is not valid"))
744 raise error.InputError(_(b"empty destination path is not valid"))
748
745
749 destvfs = vfsmod.vfs(dest, expandpath=True)
746 destvfs = vfsmod.vfs(dest, expandpath=True)
750 if destvfs.lexists():
747 if destvfs.lexists():
751 if not destvfs.isdir():
748 if not destvfs.isdir():
752 raise error.InputError(
749 raise error.InputError(
753 _(b"destination '%s' already exists") % dest
750 _(b"destination '%s' already exists") % dest
754 )
751 )
755 elif destvfs.listdir():
752 elif destvfs.listdir():
756 raise error.InputError(
753 raise error.InputError(
757 _(b"destination '%s' is not empty") % dest
754 _(b"destination '%s' is not empty") % dest
758 )
755 )
759
756
760 createopts = {}
757 createopts = {}
761 narrow = False
758 narrow = False
762
759
763 if storeincludepats is not None:
760 if storeincludepats is not None:
764 narrowspec.validatepatterns(storeincludepats)
761 narrowspec.validatepatterns(storeincludepats)
765 narrow = True
762 narrow = True
766
763
767 if storeexcludepats is not None:
764 if storeexcludepats is not None:
768 narrowspec.validatepatterns(storeexcludepats)
765 narrowspec.validatepatterns(storeexcludepats)
769 narrow = True
766 narrow = True
770
767
771 if narrow:
768 if narrow:
772 # Include everything by default if only exclusion patterns defined.
769 # Include everything by default if only exclusion patterns defined.
773 if storeexcludepats and not storeincludepats:
770 if storeexcludepats and not storeincludepats:
774 storeincludepats = {b'path:.'}
771 storeincludepats = {b'path:.'}
775
772
776 createopts[b'narrowfiles'] = True
773 createopts[b'narrowfiles'] = True
777
774
778 if depth:
775 if depth:
779 createopts[b'shallowfilestore'] = True
776 createopts[b'shallowfilestore'] = True
780
777
781 if srcpeer.capable(b'lfs-serve'):
778 if srcpeer.capable(b'lfs-serve'):
782 # Repository creation honors the config if it disabled the extension, so
779 # Repository creation honors the config if it disabled the extension, so
783 # we can't just announce that lfs will be enabled. This check avoids
780 # we can't just announce that lfs will be enabled. This check avoids
784 # saying that lfs will be enabled, and then saying it's an unknown
781 # saying that lfs will be enabled, and then saying it's an unknown
785 # feature. The lfs creation option is set in either case so that a
782 # feature. The lfs creation option is set in either case so that a
786 # requirement is added. If the extension is explicitly disabled but the
783 # requirement is added. If the extension is explicitly disabled but the
787 # requirement is set, the clone aborts early, before transferring any
784 # requirement is set, the clone aborts early, before transferring any
788 # data.
785 # data.
789 createopts[b'lfs'] = True
786 createopts[b'lfs'] = True
790
787
791 if extensions.disabled_help(b'lfs'):
788 if extensions.disabled_help(b'lfs'):
792 ui.status(
789 ui.status(
793 _(
790 _(
794 b'(remote is using large file support (lfs), but it is '
791 b'(remote is using large file support (lfs), but it is '
795 b'explicitly disabled in the local configuration)\n'
792 b'explicitly disabled in the local configuration)\n'
796 )
793 )
797 )
794 )
798 else:
795 else:
799 ui.status(
796 ui.status(
800 _(
797 _(
801 b'(remote is using large file support (lfs); lfs will '
798 b'(remote is using large file support (lfs); lfs will '
802 b'be enabled for this repository)\n'
799 b'be enabled for this repository)\n'
803 )
800 )
804 )
801 )
805
802
806 shareopts = shareopts or {}
803 shareopts = shareopts or {}
807 sharepool = shareopts.get(b'pool')
804 sharepool = shareopts.get(b'pool')
808 sharenamemode = shareopts.get(b'mode')
805 sharenamemode = shareopts.get(b'mode')
809 if sharepool and islocal(dest):
806 if sharepool and islocal(dest):
810 sharepath = None
807 sharepath = None
811 if sharenamemode == b'identity':
808 if sharenamemode == b'identity':
812 # Resolve the name from the initial changeset in the remote
809 # Resolve the name from the initial changeset in the remote
813 # repository. This returns nullid when the remote is empty. It
810 # repository. This returns nullid when the remote is empty. It
814 # raises RepoLookupError if revision 0 is filtered or otherwise
811 # raises RepoLookupError if revision 0 is filtered or otherwise
815 # not available. If we fail to resolve, sharing is not enabled.
812 # not available. If we fail to resolve, sharing is not enabled.
816 try:
813 try:
817 with srcpeer.commandexecutor() as e:
814 with srcpeer.commandexecutor() as e:
818 rootnode = e.callcommand(
815 rootnode = e.callcommand(
819 b'lookup',
816 b'lookup',
820 {
817 {
821 b'key': b'0',
818 b'key': b'0',
822 },
819 },
823 ).result()
820 ).result()
824
821
825 if rootnode != sha1nodeconstants.nullid:
822 if rootnode != sha1nodeconstants.nullid:
826 sharepath = os.path.join(sharepool, hex(rootnode))
823 sharepath = os.path.join(sharepool, hex(rootnode))
827 else:
824 else:
828 ui.status(
825 ui.status(
829 _(
826 _(
830 b'(not using pooled storage: '
827 b'(not using pooled storage: '
831 b'remote appears to be empty)\n'
828 b'remote appears to be empty)\n'
832 )
829 )
833 )
830 )
834 except error.RepoLookupError:
831 except error.RepoLookupError:
835 ui.status(
832 ui.status(
836 _(
833 _(
837 b'(not using pooled storage: '
834 b'(not using pooled storage: '
838 b'unable to resolve identity of remote)\n'
835 b'unable to resolve identity of remote)\n'
839 )
836 )
840 )
837 )
841 elif sharenamemode == b'remote':
838 elif sharenamemode == b'remote':
842 sharepath = os.path.join(
839 sharepath = os.path.join(
843 sharepool, hex(hashutil.sha1(source).digest())
840 sharepool, hex(hashutil.sha1(source).digest())
844 )
841 )
845 else:
842 else:
846 raise error.Abort(
843 raise error.Abort(
847 _(b'unknown share naming mode: %s') % sharenamemode
844 _(b'unknown share naming mode: %s') % sharenamemode
848 )
845 )
849
846
850 # TODO this is a somewhat arbitrary restriction.
847 # TODO this is a somewhat arbitrary restriction.
851 if narrow:
848 if narrow:
852 ui.status(
849 ui.status(
853 _(b'(pooled storage not supported for narrow clones)\n')
850 _(b'(pooled storage not supported for narrow clones)\n')
854 )
851 )
855 sharepath = None
852 sharepath = None
856
853
857 if sharepath:
854 if sharepath:
858 return clonewithshare(
855 return clonewithshare(
859 ui,
856 ui,
860 peeropts,
857 peeropts,
861 sharepath,
858 sharepath,
862 source,
859 source,
863 srcpeer,
860 srcpeer,
864 dest,
861 dest,
865 pull=pull,
862 pull=pull,
866 rev=revs,
863 rev=revs,
867 update=update,
864 update=update,
868 stream=stream,
865 stream=stream,
869 )
866 )
870
867
871 srcrepo = srcpeer.local()
868 srcrepo = srcpeer.local()
872
869
873 abspath = origsource
870 abspath = origsource
874 if islocal(origsource):
871 if islocal(origsource):
875 abspath = util.abspath(urlutil.urllocalpath(origsource))
872 abspath = util.abspath(urlutil.urllocalpath(origsource))
876
873
877 if islocal(dest):
874 if islocal(dest):
878 if os.path.exists(dest):
875 if os.path.exists(dest):
879 # only clean up directories we create ourselves
876 # only clean up directories we create ourselves
880 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
877 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
881 cleandir = hgdir
878 cleandir = hgdir
882 else:
879 else:
883 cleandir = dest
880 cleandir = dest
884
881
885 copy = False
882 copy = False
886 if (
883 if (
887 srcrepo
884 srcrepo
888 and srcrepo.cancopy()
885 and srcrepo.cancopy()
889 and islocal(dest)
886 and islocal(dest)
890 and not phases.hassecret(srcrepo)
887 and not phases.hassecret(srcrepo)
891 ):
888 ):
892 copy = not pull and not revs
889 copy = not pull and not revs
893
890
894 # TODO this is a somewhat arbitrary restriction.
891 # TODO this is a somewhat arbitrary restriction.
895 if narrow:
892 if narrow:
896 copy = False
893 copy = False
897
894
898 if copy:
895 if copy:
899 try:
896 try:
900 # we use a lock here because if we race with commit, we
897 # we use a lock here because if we race with commit, we
901 # can end up with extra data in the cloned revlogs that's
898 # can end up with extra data in the cloned revlogs that's
902 # not pointed to by changesets, thus causing verify to
899 # not pointed to by changesets, thus causing verify to
903 # fail
900 # fail
904 srclock = srcrepo.lock(wait=False)
901 srclock = srcrepo.lock(wait=False)
905 except error.LockError:
902 except error.LockError:
906 copy = False
903 copy = False
907
904
908 if copy:
905 if copy:
909 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
906 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
910
907
911 destrootpath = urlutil.urllocalpath(dest)
908 destrootpath = urlutil.urllocalpath(dest)
912 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
909 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
913 localrepo.createrepository(
910 localrepo.createrepository(
914 ui,
911 ui,
915 destrootpath,
912 destrootpath,
916 requirements=dest_reqs,
913 requirements=dest_reqs,
917 )
914 )
918 destrepo = localrepo.makelocalrepository(ui, destrootpath)
915 destrepo = localrepo.makelocalrepository(ui, destrootpath)
919
916
920 destwlock = destrepo.wlock()
917 destwlock = destrepo.wlock()
921 destlock = destrepo.lock()
918 destlock = destrepo.lock()
922 from . import streamclone # avoid cycle
919 from . import streamclone # avoid cycle
923
920
924 streamclone.local_copy(srcrepo, destrepo)
921 streamclone.local_copy(srcrepo, destrepo)
925
922
926 # we need to re-init the repo after manually copying the data
923 # we need to re-init the repo after manually copying the data
927 # into it
924 # into it
928 destpeer = peer(srcrepo, peeropts, dest)
925 destpeer = peer(srcrepo, peeropts, dest)
929
926
930 # make the peer aware that is it already locked
927 # make the peer aware that is it already locked
931 #
928 #
932 # important:
929 # important:
933 #
930 #
934 # We still need to release that lock at the end of the function
931 # We still need to release that lock at the end of the function
935 destpeer.local()._lockref = weakref.ref(destlock)
932 destpeer.local()._lockref = weakref.ref(destlock)
936 destpeer.local()._wlockref = weakref.ref(destwlock)
933 destpeer.local()._wlockref = weakref.ref(destwlock)
937 # dirstate also needs to be copied because `_wlockref` has a reference
934 # dirstate also needs to be copied because `_wlockref` has a reference
938 # to it: this dirstate is saved to disk when the wlock is released
935 # to it: this dirstate is saved to disk when the wlock is released
939 destpeer.local().dirstate = destrepo.dirstate
936 destpeer.local().dirstate = destrepo.dirstate
940
937
941 srcrepo.hook(
938 srcrepo.hook(
942 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
939 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
943 )
940 )
944 else:
941 else:
945 try:
942 try:
946 # only pass ui when no srcrepo
943 # only pass ui when no srcrepo
947 destpeer = peer(
944 destpeer = peer(
948 srcrepo or ui,
945 srcrepo or ui,
949 peeropts,
946 peeropts,
950 dest,
947 dest,
951 create=True,
948 create=True,
952 createopts=createopts,
949 createopts=createopts,
953 )
950 )
954 except FileExistsError:
951 except FileExistsError:
955 cleandir = None
952 cleandir = None
956 raise error.Abort(_(b"destination '%s' already exists") % dest)
953 raise error.Abort(_(b"destination '%s' already exists") % dest)
957
954
958 if revs:
955 if revs:
959 if not srcpeer.capable(b'lookup'):
956 if not srcpeer.capable(b'lookup'):
960 raise error.Abort(
957 raise error.Abort(
961 _(
958 _(
962 b"src repository does not support "
959 b"src repository does not support "
963 b"revision lookup and so doesn't "
960 b"revision lookup and so doesn't "
964 b"support clone by revision"
961 b"support clone by revision"
965 )
962 )
966 )
963 )
967
964
968 # TODO this is batchable.
965 # TODO this is batchable.
969 remoterevs = []
966 remoterevs = []
970 for rev in revs:
967 for rev in revs:
971 with srcpeer.commandexecutor() as e:
968 with srcpeer.commandexecutor() as e:
972 remoterevs.append(
969 remoterevs.append(
973 e.callcommand(
970 e.callcommand(
974 b'lookup',
971 b'lookup',
975 {
972 {
976 b'key': rev,
973 b'key': rev,
977 },
974 },
978 ).result()
975 ).result()
979 )
976 )
980 revs = remoterevs
977 revs = remoterevs
981
978
982 checkout = revs[0]
979 checkout = revs[0]
983 else:
980 else:
984 revs = None
981 revs = None
985 local = destpeer.local()
982 local = destpeer.local()
986 if local:
983 if local:
987 if narrow:
984 if narrow:
988 with local.wlock(), local.lock():
985 with local.wlock(), local.lock():
989 local.setnarrowpats(storeincludepats, storeexcludepats)
986 local.setnarrowpats(storeincludepats, storeexcludepats)
990 narrowspec.copytoworkingcopy(local)
987 narrowspec.copytoworkingcopy(local)
991
988
992 u = urlutil.url(abspath)
989 u = urlutil.url(abspath)
993 defaulturl = bytes(u)
990 defaulturl = bytes(u)
994 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
995 if not stream:
992 if not stream:
996 if pull:
993 if pull:
997 stream = False
994 stream = False
998 else:
995 else:
999 stream = None
996 stream = None
1000 # internal config: ui.quietbookmarkmove
997 # internal config: ui.quietbookmarkmove
1001 overrides = {(b'ui', b'quietbookmarkmove'): True}
998 overrides = {(b'ui', b'quietbookmarkmove'): True}
1002 with local.ui.configoverride(overrides, b'clone'):
999 with local.ui.configoverride(overrides, b'clone'):
1003 exchange.pull(
1000 exchange.pull(
1004 local,
1001 local,
1005 srcpeer,
1002 srcpeer,
1006 heads=revs,
1003 heads=revs,
1007 streamclonerequested=stream,
1004 streamclonerequested=stream,
1008 includepats=storeincludepats,
1005 includepats=storeincludepats,
1009 excludepats=storeexcludepats,
1006 excludepats=storeexcludepats,
1010 depth=depth,
1007 depth=depth,
1011 )
1008 )
1012 elif srcrepo:
1009 elif srcrepo:
1013 # TODO lift restriction once exchange.push() accepts narrow
1010 # TODO lift restriction once exchange.push() accepts narrow
1014 # push.
1011 # push.
1015 if narrow:
1012 if narrow:
1016 raise error.Abort(
1013 raise error.Abort(
1017 _(
1014 _(
1018 b'narrow clone not available for '
1015 b'narrow clone not available for '
1019 b'remote destinations'
1016 b'remote destinations'
1020 )
1017 )
1021 )
1018 )
1022
1019
1023 exchange.push(
1020 exchange.push(
1024 srcrepo,
1021 srcrepo,
1025 destpeer,
1022 destpeer,
1026 revs=revs,
1023 revs=revs,
1027 bookmarks=srcrepo._bookmarks.keys(),
1024 bookmarks=srcrepo._bookmarks.keys(),
1028 )
1025 )
1029 else:
1026 else:
1030 raise error.Abort(
1027 raise error.Abort(
1031 _(b"clone from remote to remote not supported")
1028 _(b"clone from remote to remote not supported")
1032 )
1029 )
1033
1030
1034 cleandir = None
1031 cleandir = None
1035
1032
1036 destrepo = destpeer.local()
1033 destrepo = destpeer.local()
1037 if destrepo:
1034 if destrepo:
1038 template = uimod.samplehgrcs[b'cloned']
1035 template = uimod.samplehgrcs[b'cloned']
1039 u = urlutil.url(abspath)
1036 u = urlutil.url(abspath)
1040 u.passwd = None
1037 u.passwd = None
1041 defaulturl = bytes(u)
1038 defaulturl = bytes(u)
1042 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1039 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1043 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1040 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1044
1041
1045 if ui.configbool(b'experimental', b'remotenames'):
1042 if ui.configbool(b'experimental', b'remotenames'):
1046 logexchange.pullremotenames(destrepo, srcpeer)
1043 logexchange.pullremotenames(destrepo, srcpeer)
1047
1044
1048 if update:
1045 if update:
1049 if update is not True:
1046 if update is not True:
1050 with srcpeer.commandexecutor() as e:
1047 with srcpeer.commandexecutor() as e:
1051 checkout = e.callcommand(
1048 checkout = e.callcommand(
1052 b'lookup',
1049 b'lookup',
1053 {
1050 {
1054 b'key': update,
1051 b'key': update,
1055 },
1052 },
1056 ).result()
1053 ).result()
1057
1054
1058 uprev = None
1055 uprev = None
1059 status = None
1056 status = None
1060 if checkout is not None:
1057 if checkout is not None:
1061 # Some extensions (at least hg-git and hg-subversion) have
1058 # Some extensions (at least hg-git and hg-subversion) have
1062 # a peer.lookup() implementation that returns a name instead
1059 # a peer.lookup() implementation that returns a name instead
1063 # of a nodeid. We work around it here until we've figured
1060 # of a nodeid. We work around it here until we've figured
1064 # out a better solution.
1061 # out a better solution.
1065 if len(checkout) == 20 and checkout in destrepo:
1062 if len(checkout) == 20 and checkout in destrepo:
1066 uprev = checkout
1063 uprev = checkout
1067 elif scmutil.isrevsymbol(destrepo, checkout):
1064 elif scmutil.isrevsymbol(destrepo, checkout):
1068 uprev = scmutil.revsymbol(destrepo, checkout).node()
1065 uprev = scmutil.revsymbol(destrepo, checkout).node()
1069 else:
1066 else:
1070 if update is not True:
1067 if update is not True:
1071 try:
1068 try:
1072 uprev = destrepo.lookup(update)
1069 uprev = destrepo.lookup(update)
1073 except error.RepoLookupError:
1070 except error.RepoLookupError:
1074 pass
1071 pass
1075 if uprev is None:
1072 if uprev is None:
1076 try:
1073 try:
1077 if destrepo._activebookmark:
1074 if destrepo._activebookmark:
1078 uprev = destrepo.lookup(destrepo._activebookmark)
1075 uprev = destrepo.lookup(destrepo._activebookmark)
1079 update = destrepo._activebookmark
1076 update = destrepo._activebookmark
1080 else:
1077 else:
1081 uprev = destrepo._bookmarks[b'@']
1078 uprev = destrepo._bookmarks[b'@']
1082 update = b'@'
1079 update = b'@'
1083 bn = destrepo[uprev].branch()
1080 bn = destrepo[uprev].branch()
1084 if bn == b'default':
1081 if bn == b'default':
1085 status = _(b"updating to bookmark %s\n" % update)
1082 status = _(b"updating to bookmark %s\n" % update)
1086 else:
1083 else:
1087 status = (
1084 status = (
1088 _(b"updating to bookmark %s on branch %s\n")
1085 _(b"updating to bookmark %s on branch %s\n")
1089 ) % (update, bn)
1086 ) % (update, bn)
1090 except KeyError:
1087 except KeyError:
1091 try:
1088 try:
1092 uprev = destrepo.branchtip(b'default')
1089 uprev = destrepo.branchtip(b'default')
1093 except error.RepoLookupError:
1090 except error.RepoLookupError:
1094 uprev = destrepo.lookup(b'tip')
1091 uprev = destrepo.lookup(b'tip')
1095 if not status:
1092 if not status:
1096 bn = destrepo[uprev].branch()
1093 bn = destrepo[uprev].branch()
1097 status = _(b"updating to branch %s\n") % bn
1094 status = _(b"updating to branch %s\n") % bn
1098 destrepo.ui.status(status)
1095 destrepo.ui.status(status)
1099 _update(destrepo, uprev)
1096 _update(destrepo, uprev)
1100 if update in destrepo._bookmarks:
1097 if update in destrepo._bookmarks:
1101 bookmarks.activate(destrepo, update)
1098 bookmarks.activate(destrepo, update)
1102 if destlock is not None:
1099 if destlock is not None:
1103 release(destlock)
1100 release(destlock)
1104 if destwlock is not None:
1101 if destwlock is not None:
1105 release(destlock)
1102 release(destlock)
1106 # here is a tiny windows were someone could end up writing the
1103 # here is a tiny windows were someone could end up writing the
1107 # repository before the cache are sure to be warm. This is "fine"
1104 # repository before the cache are sure to be warm. This is "fine"
1108 # as the only "bad" outcome would be some slowness. That potential
1105 # as the only "bad" outcome would be some slowness. That potential
1109 # slowness already affect reader.
1106 # slowness already affect reader.
1110 with destrepo.lock():
1107 with destrepo.lock():
1111 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1108 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1112 finally:
1109 finally:
1113 release(srclock, destlock, destwlock)
1110 release(srclock, destlock, destwlock)
1114 if cleandir is not None:
1111 if cleandir is not None:
1115 shutil.rmtree(cleandir, True)
1112 shutil.rmtree(cleandir, True)
1116 if srcpeer is not None:
1113 if srcpeer is not None:
1117 srcpeer.close()
1114 srcpeer.close()
1118 if destpeer and destpeer.local() is None:
1115 if destpeer and destpeer.local() is None:
1119 destpeer.close()
1116 destpeer.close()
1120 return srcpeer, destpeer
1117 return srcpeer, destpeer
1121
1118
1122
1119
1123 def _showstats(repo, stats, quietempty=False):
1120 def _showstats(repo, stats, quietempty=False):
1124 if quietempty and stats.isempty():
1121 if quietempty and stats.isempty():
1125 return
1122 return
1126 repo.ui.status(
1123 repo.ui.status(
1127 _(
1124 _(
1128 b"%d files updated, %d files merged, "
1125 b"%d files updated, %d files merged, "
1129 b"%d files removed, %d files unresolved\n"
1126 b"%d files removed, %d files unresolved\n"
1130 )
1127 )
1131 % (
1128 % (
1132 stats.updatedcount,
1129 stats.updatedcount,
1133 stats.mergedcount,
1130 stats.mergedcount,
1134 stats.removedcount,
1131 stats.removedcount,
1135 stats.unresolvedcount,
1132 stats.unresolvedcount,
1136 )
1133 )
1137 )
1134 )
1138
1135
1139
1136
1140 def updaterepo(repo, node, overwrite, updatecheck=None):
1137 def updaterepo(repo, node, overwrite, updatecheck=None):
1141 """Update the working directory to node.
1138 """Update the working directory to node.
1142
1139
1143 When overwrite is set, changes are clobbered, merged else
1140 When overwrite is set, changes are clobbered, merged else
1144
1141
1145 returns stats (see pydoc mercurial.merge.applyupdates)"""
1142 returns stats (see pydoc mercurial.merge.applyupdates)"""
1146 repo.ui.deprecwarn(
1143 repo.ui.deprecwarn(
1147 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1144 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1148 b'5.7',
1145 b'5.7',
1149 )
1146 )
1150 return mergemod._update(
1147 return mergemod._update(
1151 repo,
1148 repo,
1152 node,
1149 node,
1153 branchmerge=False,
1150 branchmerge=False,
1154 force=overwrite,
1151 force=overwrite,
1155 labels=[b'working copy', b'destination'],
1152 labels=[b'working copy', b'destination'],
1156 updatecheck=updatecheck,
1153 updatecheck=updatecheck,
1157 )
1154 )
1158
1155
1159
1156
1160 def update(repo, node, quietempty=False, updatecheck=None):
1157 def update(repo, node, quietempty=False, updatecheck=None):
1161 """update the working directory to node"""
1158 """update the working directory to node"""
1162 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1159 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1163 _showstats(repo, stats, quietempty)
1160 _showstats(repo, stats, quietempty)
1164 if stats.unresolvedcount:
1161 if stats.unresolvedcount:
1165 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1162 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1166 return stats.unresolvedcount > 0
1163 return stats.unresolvedcount > 0
1167
1164
1168
1165
1169 # naming conflict in clone()
1166 # naming conflict in clone()
1170 _update = update
1167 _update = update
1171
1168
1172
1169
1173 def clean(repo, node, show_stats=True, quietempty=False):
1170 def clean(repo, node, show_stats=True, quietempty=False):
1174 """forcibly switch the working directory to node, clobbering changes"""
1171 """forcibly switch the working directory to node, clobbering changes"""
1175 stats = mergemod.clean_update(repo[node])
1172 stats = mergemod.clean_update(repo[node])
1176 assert stats.unresolvedcount == 0
1173 assert stats.unresolvedcount == 0
1177 if show_stats:
1174 if show_stats:
1178 _showstats(repo, stats, quietempty)
1175 _showstats(repo, stats, quietempty)
1179 return False
1176 return False
1180
1177
1181
1178
1182 # naming conflict in updatetotally()
1179 # naming conflict in updatetotally()
1183 _clean = clean
1180 _clean = clean
1184
1181
1185 _VALID_UPDATECHECKS = {
1182 _VALID_UPDATECHECKS = {
1186 mergemod.UPDATECHECK_ABORT,
1183 mergemod.UPDATECHECK_ABORT,
1187 mergemod.UPDATECHECK_NONE,
1184 mergemod.UPDATECHECK_NONE,
1188 mergemod.UPDATECHECK_LINEAR,
1185 mergemod.UPDATECHECK_LINEAR,
1189 mergemod.UPDATECHECK_NO_CONFLICT,
1186 mergemod.UPDATECHECK_NO_CONFLICT,
1190 }
1187 }
1191
1188
1192
1189
1193 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1190 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1194 """Update the working directory with extra care for non-file components
1191 """Update the working directory with extra care for non-file components
1195
1192
1196 This takes care of non-file components below:
1193 This takes care of non-file components below:
1197
1194
1198 :bookmark: might be advanced or (in)activated
1195 :bookmark: might be advanced or (in)activated
1199
1196
1200 This takes arguments below:
1197 This takes arguments below:
1201
1198
1202 :checkout: to which revision the working directory is updated
1199 :checkout: to which revision the working directory is updated
1203 :brev: a name, which might be a bookmark to be activated after updating
1200 :brev: a name, which might be a bookmark to be activated after updating
1204 :clean: whether changes in the working directory can be discarded
1201 :clean: whether changes in the working directory can be discarded
1205 :updatecheck: how to deal with a dirty working directory
1202 :updatecheck: how to deal with a dirty working directory
1206
1203
1207 Valid values for updatecheck are the UPDATECHECK_* constants
1204 Valid values for updatecheck are the UPDATECHECK_* constants
1208 defined in the merge module. Passing `None` will result in using the
1205 defined in the merge module. Passing `None` will result in using the
1209 configured default.
1206 configured default.
1210
1207
1211 * ABORT: abort if the working directory is dirty
1208 * ABORT: abort if the working directory is dirty
1212 * NONE: don't check (merge working directory changes into destination)
1209 * NONE: don't check (merge working directory changes into destination)
1213 * LINEAR: check that update is linear before merging working directory
1210 * LINEAR: check that update is linear before merging working directory
1214 changes into destination
1211 changes into destination
1215 * NO_CONFLICT: check that the update does not result in file merges
1212 * NO_CONFLICT: check that the update does not result in file merges
1216
1213
1217 This returns whether conflict is detected at updating or not.
1214 This returns whether conflict is detected at updating or not.
1218 """
1215 """
1219 if updatecheck is None:
1216 if updatecheck is None:
1220 updatecheck = ui.config(b'commands', b'update.check')
1217 updatecheck = ui.config(b'commands', b'update.check')
1221 if updatecheck not in _VALID_UPDATECHECKS:
1218 if updatecheck not in _VALID_UPDATECHECKS:
1222 # If not configured, or invalid value configured
1219 # If not configured, or invalid value configured
1223 updatecheck = mergemod.UPDATECHECK_LINEAR
1220 updatecheck = mergemod.UPDATECHECK_LINEAR
1224 if updatecheck not in _VALID_UPDATECHECKS:
1221 if updatecheck not in _VALID_UPDATECHECKS:
1225 raise ValueError(
1222 raise ValueError(
1226 r'Invalid updatecheck value %r (can accept %r)'
1223 r'Invalid updatecheck value %r (can accept %r)'
1227 % (updatecheck, _VALID_UPDATECHECKS)
1224 % (updatecheck, _VALID_UPDATECHECKS)
1228 )
1225 )
1229 with repo.wlock():
1226 with repo.wlock():
1230 movemarkfrom = None
1227 movemarkfrom = None
1231 warndest = False
1228 warndest = False
1232 if checkout is None:
1229 if checkout is None:
1233 updata = destutil.destupdate(repo, clean=clean)
1230 updata = destutil.destupdate(repo, clean=clean)
1234 checkout, movemarkfrom, brev = updata
1231 checkout, movemarkfrom, brev = updata
1235 warndest = True
1232 warndest = True
1236
1233
1237 if clean:
1234 if clean:
1238 ret = _clean(repo, checkout)
1235 ret = _clean(repo, checkout)
1239 else:
1236 else:
1240 if updatecheck == mergemod.UPDATECHECK_ABORT:
1237 if updatecheck == mergemod.UPDATECHECK_ABORT:
1241 cmdutil.bailifchanged(repo, merge=False)
1238 cmdutil.bailifchanged(repo, merge=False)
1242 updatecheck = mergemod.UPDATECHECK_NONE
1239 updatecheck = mergemod.UPDATECHECK_NONE
1243 ret = _update(repo, checkout, updatecheck=updatecheck)
1240 ret = _update(repo, checkout, updatecheck=updatecheck)
1244
1241
1245 if not ret and movemarkfrom:
1242 if not ret and movemarkfrom:
1246 if movemarkfrom == repo[b'.'].node():
1243 if movemarkfrom == repo[b'.'].node():
1247 pass # no-op update
1244 pass # no-op update
1248 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1245 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1249 b = ui.label(repo._activebookmark, b'bookmarks.active')
1246 b = ui.label(repo._activebookmark, b'bookmarks.active')
1250 ui.status(_(b"updating bookmark %s\n") % b)
1247 ui.status(_(b"updating bookmark %s\n") % b)
1251 else:
1248 else:
1252 # this can happen with a non-linear update
1249 # this can happen with a non-linear update
1253 b = ui.label(repo._activebookmark, b'bookmarks')
1250 b = ui.label(repo._activebookmark, b'bookmarks')
1254 ui.status(_(b"(leaving bookmark %s)\n") % b)
1251 ui.status(_(b"(leaving bookmark %s)\n") % b)
1255 bookmarks.deactivate(repo)
1252 bookmarks.deactivate(repo)
1256 elif brev in repo._bookmarks:
1253 elif brev in repo._bookmarks:
1257 if brev != repo._activebookmark:
1254 if brev != repo._activebookmark:
1258 b = ui.label(brev, b'bookmarks.active')
1255 b = ui.label(brev, b'bookmarks.active')
1259 ui.status(_(b"(activating bookmark %s)\n") % b)
1256 ui.status(_(b"(activating bookmark %s)\n") % b)
1260 bookmarks.activate(repo, brev)
1257 bookmarks.activate(repo, brev)
1261 elif brev:
1258 elif brev:
1262 if repo._activebookmark:
1259 if repo._activebookmark:
1263 b = ui.label(repo._activebookmark, b'bookmarks')
1260 b = ui.label(repo._activebookmark, b'bookmarks')
1264 ui.status(_(b"(leaving bookmark %s)\n") % b)
1261 ui.status(_(b"(leaving bookmark %s)\n") % b)
1265 bookmarks.deactivate(repo)
1262 bookmarks.deactivate(repo)
1266
1263
1267 if warndest:
1264 if warndest:
1268 destutil.statusotherdests(ui, repo)
1265 destutil.statusotherdests(ui, repo)
1269
1266
1270 return ret
1267 return ret
1271
1268
1272
1269
1273 def merge(
1270 def merge(
1274 ctx,
1271 ctx,
1275 force=False,
1272 force=False,
1276 remind=True,
1273 remind=True,
1277 labels=None,
1274 labels=None,
1278 ):
1275 ):
1279 """Branch merge with node, resolving changes. Return true if any
1276 """Branch merge with node, resolving changes. Return true if any
1280 unresolved conflicts."""
1277 unresolved conflicts."""
1281 repo = ctx.repo()
1278 repo = ctx.repo()
1282 stats = mergemod.merge(ctx, force=force, labels=labels)
1279 stats = mergemod.merge(ctx, force=force, labels=labels)
1283 _showstats(repo, stats)
1280 _showstats(repo, stats)
1284 if stats.unresolvedcount:
1281 if stats.unresolvedcount:
1285 repo.ui.status(
1282 repo.ui.status(
1286 _(
1283 _(
1287 b"use 'hg resolve' to retry unresolved file merges "
1284 b"use 'hg resolve' to retry unresolved file merges "
1288 b"or 'hg merge --abort' to abandon\n"
1285 b"or 'hg merge --abort' to abandon\n"
1289 )
1286 )
1290 )
1287 )
1291 elif remind:
1288 elif remind:
1292 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1289 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1293 return stats.unresolvedcount > 0
1290 return stats.unresolvedcount > 0
1294
1291
1295
1292
1296 def abortmerge(ui, repo):
1293 def abortmerge(ui, repo):
1297 ms = mergestatemod.mergestate.read(repo)
1294 ms = mergestatemod.mergestate.read(repo)
1298 if ms.active():
1295 if ms.active():
1299 # there were conflicts
1296 # there were conflicts
1300 node = ms.localctx.hex()
1297 node = ms.localctx.hex()
1301 else:
1298 else:
1302 # there were no conficts, mergestate was not stored
1299 # there were no conficts, mergestate was not stored
1303 node = repo[b'.'].hex()
1300 node = repo[b'.'].hex()
1304
1301
1305 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1302 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1306 stats = mergemod.clean_update(repo[node])
1303 stats = mergemod.clean_update(repo[node])
1307 assert stats.unresolvedcount == 0
1304 assert stats.unresolvedcount == 0
1308 _showstats(repo, stats)
1305 _showstats(repo, stats)
1309
1306
1310
1307
1311 def _incoming(
1308 def _incoming(
1312 displaychlist,
1309 displaychlist,
1313 subreporecurse,
1310 subreporecurse,
1314 ui,
1311 ui,
1315 repo,
1312 repo,
1316 source,
1313 source,
1317 opts,
1314 opts,
1318 buffered=False,
1315 buffered=False,
1319 subpath=None,
1316 subpath=None,
1320 ):
1317 ):
1321 """
1318 """
1322 Helper for incoming / gincoming.
1319 Helper for incoming / gincoming.
1323 displaychlist gets called with
1320 displaychlist gets called with
1324 (remoterepo, incomingchangesetlist, displayer) parameters,
1321 (remoterepo, incomingchangesetlist, displayer) parameters,
1325 and is supposed to contain only code that can't be unified.
1322 and is supposed to contain only code that can't be unified.
1326 """
1323 """
1327 srcs = urlutil.get_pull_paths(repo, ui, [source])
1324 srcs = urlutil.get_pull_paths(repo, ui, [source])
1328 srcs = list(srcs)
1325 srcs = list(srcs)
1329 if len(srcs) != 1:
1326 if len(srcs) != 1:
1330 msg = _(b'for now, incoming supports only a single source, %d provided')
1327 msg = _(b'for now, incoming supports only a single source, %d provided')
1331 msg %= len(srcs)
1328 msg %= len(srcs)
1332 raise error.Abort(msg)
1329 raise error.Abort(msg)
1333 path = srcs[0]
1330 path = srcs[0]
1334 if subpath is None:
1331 if subpath is None:
1335 peer_path = path
1332 peer_path = path
1336 url = path.loc
1333 url = path.loc
1337 else:
1334 else:
1338 # XXX path: we are losing the `path` object here. Keeping it would be
1335 # XXX path: we are losing the `path` object here. Keeping it would be
1339 # valuable. For example as a "variant" as we do for pushes.
1336 # valuable. For example as a "variant" as we do for pushes.
1340 subpath = urlutil.url(subpath)
1337 subpath = urlutil.url(subpath)
1341 if subpath.isabs():
1338 if subpath.isabs():
1342 peer_path = url = bytes(subpath)
1339 peer_path = url = bytes(subpath)
1343 else:
1340 else:
1344 p = urlutil.url(path.loc)
1341 p = urlutil.url(path.loc)
1345 if p.islocal():
1342 if p.islocal():
1346 normpath = os.path.normpath
1343 normpath = os.path.normpath
1347 else:
1344 else:
1348 normpath = posixpath.normpath
1345 normpath = posixpath.normpath
1349 p.path = normpath(b'%s/%s' % (p.path, subpath))
1346 p.path = normpath(b'%s/%s' % (p.path, subpath))
1350 peer_path = url = bytes(p)
1347 peer_path = url = bytes(p)
1351 other = peer(repo, opts, peer_path)
1348 other = peer(repo, opts, peer_path)
1352 cleanupfn = other.close
1349 cleanupfn = other.close
1353 try:
1350 try:
1354 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1351 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1355 branches = (path.branch, opts.get(b'branch', []))
1352 branches = (path.branch, opts.get(b'branch', []))
1356 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1353 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1357
1354
1358 if revs:
1355 if revs:
1359 revs = [other.lookup(rev) for rev in revs]
1356 revs = [other.lookup(rev) for rev in revs]
1360 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1357 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1361 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1358 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1362 )
1359 )
1363
1360
1364 if not chlist:
1361 if not chlist:
1365 ui.status(_(b"no changes found\n"))
1362 ui.status(_(b"no changes found\n"))
1366 return subreporecurse()
1363 return subreporecurse()
1367 ui.pager(b'incoming')
1364 ui.pager(b'incoming')
1368 displayer = logcmdutil.changesetdisplayer(
1365 displayer = logcmdutil.changesetdisplayer(
1369 ui, other, opts, buffered=buffered
1366 ui, other, opts, buffered=buffered
1370 )
1367 )
1371 displaychlist(other, chlist, displayer)
1368 displaychlist(other, chlist, displayer)
1372 displayer.close()
1369 displayer.close()
1373 finally:
1370 finally:
1374 cleanupfn()
1371 cleanupfn()
1375 subreporecurse()
1372 subreporecurse()
1376 return 0 # exit code is zero since we found incoming changes
1373 return 0 # exit code is zero since we found incoming changes
1377
1374
1378
1375
1379 def incoming(ui, repo, source, opts, subpath=None):
1376 def incoming(ui, repo, source, opts, subpath=None):
1380 def subreporecurse():
1377 def subreporecurse():
1381 ret = 1
1378 ret = 1
1382 if opts.get(b'subrepos'):
1379 if opts.get(b'subrepos'):
1383 ctx = repo[None]
1380 ctx = repo[None]
1384 for subpath in sorted(ctx.substate):
1381 for subpath in sorted(ctx.substate):
1385 sub = ctx.sub(subpath)
1382 sub = ctx.sub(subpath)
1386 ret = min(ret, sub.incoming(ui, source, opts))
1383 ret = min(ret, sub.incoming(ui, source, opts))
1387 return ret
1384 return ret
1388
1385
1389 def display(other, chlist, displayer):
1386 def display(other, chlist, displayer):
1390 limit = logcmdutil.getlimit(opts)
1387 limit = logcmdutil.getlimit(opts)
1391 if opts.get(b'newest_first'):
1388 if opts.get(b'newest_first'):
1392 chlist.reverse()
1389 chlist.reverse()
1393 count = 0
1390 count = 0
1394 for n in chlist:
1391 for n in chlist:
1395 if limit is not None and count >= limit:
1392 if limit is not None and count >= limit:
1396 break
1393 break
1397 parents = [
1394 parents = [
1398 p for p in other.changelog.parents(n) if p != repo.nullid
1395 p for p in other.changelog.parents(n) if p != repo.nullid
1399 ]
1396 ]
1400 if opts.get(b'no_merges') and len(parents) == 2:
1397 if opts.get(b'no_merges') and len(parents) == 2:
1401 continue
1398 continue
1402 count += 1
1399 count += 1
1403 displayer.show(other[n])
1400 displayer.show(other[n])
1404
1401
1405 return _incoming(
1402 return _incoming(
1406 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1403 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1407 )
1404 )
1408
1405
1409
1406
1410 def _outgoing(ui, repo, dests, opts, subpath=None):
1407 def _outgoing(ui, repo, dests, opts, subpath=None):
1411 out = set()
1408 out = set()
1412 others = []
1409 others = []
1413 for path in urlutil.get_push_paths(repo, ui, dests):
1410 for path in urlutil.get_push_paths(repo, ui, dests):
1414 dest = path.loc
1411 dest = path.loc
1415 if subpath is not None:
1412 if subpath is not None:
1416 subpath = urlutil.url(subpath)
1413 subpath = urlutil.url(subpath)
1417 if subpath.isabs():
1414 if subpath.isabs():
1418 dest = bytes(subpath)
1415 dest = bytes(subpath)
1419 else:
1416 else:
1420 p = urlutil.url(dest)
1417 p = urlutil.url(dest)
1421 if p.islocal():
1418 if p.islocal():
1422 normpath = os.path.normpath
1419 normpath = os.path.normpath
1423 else:
1420 else:
1424 normpath = posixpath.normpath
1421 normpath = posixpath.normpath
1425 p.path = normpath(b'%s/%s' % (p.path, subpath))
1422 p.path = normpath(b'%s/%s' % (p.path, subpath))
1426 dest = bytes(p)
1423 dest = bytes(p)
1427 branches = path.branch, opts.get(b'branch') or []
1424 branches = path.branch, opts.get(b'branch') or []
1428
1425
1429 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1426 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1430 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1427 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1431 if revs:
1428 if revs:
1432 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1429 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1433
1430
1434 other = peer(repo, opts, dest)
1431 other = peer(repo, opts, dest)
1435 try:
1432 try:
1436 outgoing = discovery.findcommonoutgoing(
1433 outgoing = discovery.findcommonoutgoing(
1437 repo, other, revs, force=opts.get(b'force')
1434 repo, other, revs, force=opts.get(b'force')
1438 )
1435 )
1439 o = outgoing.missing
1436 o = outgoing.missing
1440 out.update(o)
1437 out.update(o)
1441 if not o:
1438 if not o:
1442 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1439 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1443 others.append(other)
1440 others.append(other)
1444 except: # re-raises
1441 except: # re-raises
1445 other.close()
1442 other.close()
1446 raise
1443 raise
1447 # make sure this is ordered by revision number
1444 # make sure this is ordered by revision number
1448 outgoing_revs = list(out)
1445 outgoing_revs = list(out)
1449 cl = repo.changelog
1446 cl = repo.changelog
1450 outgoing_revs.sort(key=cl.rev)
1447 outgoing_revs.sort(key=cl.rev)
1451 return outgoing_revs, others
1448 return outgoing_revs, others
1452
1449
1453
1450
1454 def _outgoing_recurse(ui, repo, dests, opts):
1451 def _outgoing_recurse(ui, repo, dests, opts):
1455 ret = 1
1452 ret = 1
1456 if opts.get(b'subrepos'):
1453 if opts.get(b'subrepos'):
1457 ctx = repo[None]
1454 ctx = repo[None]
1458 for subpath in sorted(ctx.substate):
1455 for subpath in sorted(ctx.substate):
1459 sub = ctx.sub(subpath)
1456 sub = ctx.sub(subpath)
1460 ret = min(ret, sub.outgoing(ui, dests, opts))
1457 ret = min(ret, sub.outgoing(ui, dests, opts))
1461 return ret
1458 return ret
1462
1459
1463
1460
1464 def _outgoing_filter(repo, revs, opts):
1461 def _outgoing_filter(repo, revs, opts):
1465 """apply revision filtering/ordering option for outgoing"""
1462 """apply revision filtering/ordering option for outgoing"""
1466 limit = logcmdutil.getlimit(opts)
1463 limit = logcmdutil.getlimit(opts)
1467 no_merges = opts.get(b'no_merges')
1464 no_merges = opts.get(b'no_merges')
1468 if opts.get(b'newest_first'):
1465 if opts.get(b'newest_first'):
1469 revs.reverse()
1466 revs.reverse()
1470 if limit is None and not no_merges:
1467 if limit is None and not no_merges:
1471 for r in revs:
1468 for r in revs:
1472 yield r
1469 yield r
1473 return
1470 return
1474
1471
1475 count = 0
1472 count = 0
1476 cl = repo.changelog
1473 cl = repo.changelog
1477 for n in revs:
1474 for n in revs:
1478 if limit is not None and count >= limit:
1475 if limit is not None and count >= limit:
1479 break
1476 break
1480 parents = [p for p in cl.parents(n) if p != repo.nullid]
1477 parents = [p for p in cl.parents(n) if p != repo.nullid]
1481 if no_merges and len(parents) == 2:
1478 if no_merges and len(parents) == 2:
1482 continue
1479 continue
1483 count += 1
1480 count += 1
1484 yield n
1481 yield n
1485
1482
1486
1483
1487 def outgoing(ui, repo, dests, opts, subpath=None):
1484 def outgoing(ui, repo, dests, opts, subpath=None):
1488 if opts.get(b'graph'):
1485 if opts.get(b'graph'):
1489 logcmdutil.checkunsupportedgraphflags([], opts)
1486 logcmdutil.checkunsupportedgraphflags([], opts)
1490 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1487 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1491 ret = 1
1488 ret = 1
1492 try:
1489 try:
1493 if o:
1490 if o:
1494 ret = 0
1491 ret = 0
1495
1492
1496 if opts.get(b'graph'):
1493 if opts.get(b'graph'):
1497 revdag = logcmdutil.graphrevs(repo, o, opts)
1494 revdag = logcmdutil.graphrevs(repo, o, opts)
1498 ui.pager(b'outgoing')
1495 ui.pager(b'outgoing')
1499 displayer = logcmdutil.changesetdisplayer(
1496 displayer = logcmdutil.changesetdisplayer(
1500 ui, repo, opts, buffered=True
1497 ui, repo, opts, buffered=True
1501 )
1498 )
1502 logcmdutil.displaygraph(
1499 logcmdutil.displaygraph(
1503 ui, repo, revdag, displayer, graphmod.asciiedges
1500 ui, repo, revdag, displayer, graphmod.asciiedges
1504 )
1501 )
1505 else:
1502 else:
1506 ui.pager(b'outgoing')
1503 ui.pager(b'outgoing')
1507 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1508 for n in _outgoing_filter(repo, o, opts):
1505 for n in _outgoing_filter(repo, o, opts):
1509 displayer.show(repo[n])
1506 displayer.show(repo[n])
1510 displayer.close()
1507 displayer.close()
1511 for oth in others:
1508 for oth in others:
1512 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1509 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1513 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1510 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1514 return ret # exit code is zero since we found outgoing changes
1511 return ret # exit code is zero since we found outgoing changes
1515 finally:
1512 finally:
1516 for oth in others:
1513 for oth in others:
1517 oth.close()
1514 oth.close()
1518
1515
1519
1516
1520 def verify(repo, level=None):
1517 def verify(repo, level=None):
1521 """verify the consistency of a repository"""
1518 """verify the consistency of a repository"""
1522 ret = verifymod.verify(repo, level=level)
1519 ret = verifymod.verify(repo, level=level)
1523
1520
1524 # Broken subrepo references in hidden csets don't seem worth worrying about,
1521 # Broken subrepo references in hidden csets don't seem worth worrying about,
1525 # since they can't be pushed/pulled, and --hidden can be used if they are a
1522 # since they can't be pushed/pulled, and --hidden can be used if they are a
1526 # concern.
1523 # concern.
1527
1524
1528 # pathto() is needed for -R case
1525 # pathto() is needed for -R case
1529 revs = repo.revs(
1526 revs = repo.revs(
1530 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1527 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1531 )
1528 )
1532
1529
1533 if revs:
1530 if revs:
1534 repo.ui.status(_(b'checking subrepo links\n'))
1531 repo.ui.status(_(b'checking subrepo links\n'))
1535 for rev in revs:
1532 for rev in revs:
1536 ctx = repo[rev]
1533 ctx = repo[rev]
1537 try:
1534 try:
1538 for subpath in ctx.substate:
1535 for subpath in ctx.substate:
1539 try:
1536 try:
1540 ret = (
1537 ret = (
1541 ctx.sub(subpath, allowcreate=False).verify() or ret
1538 ctx.sub(subpath, allowcreate=False).verify() or ret
1542 )
1539 )
1543 except error.RepoError as e:
1540 except error.RepoError as e:
1544 repo.ui.warn(b'%d: %s\n' % (rev, e))
1541 repo.ui.warn(b'%d: %s\n' % (rev, e))
1545 except Exception:
1542 except Exception:
1546 repo.ui.warn(
1543 repo.ui.warn(
1547 _(b'.hgsubstate is corrupt in revision %s\n')
1544 _(b'.hgsubstate is corrupt in revision %s\n')
1548 % short(ctx.node())
1545 % short(ctx.node())
1549 )
1546 )
1550
1547
1551 return ret
1548 return ret
1552
1549
1553
1550
1554 def remoteui(src, opts):
1551 def remoteui(src, opts):
1555 """build a remote ui from ui or repo and opts"""
1552 """build a remote ui from ui or repo and opts"""
1556 if util.safehasattr(src, b'baseui'): # looks like a repository
1553 if util.safehasattr(src, b'baseui'): # looks like a repository
1557 dst = src.baseui.copy() # drop repo-specific config
1554 dst = src.baseui.copy() # drop repo-specific config
1558 src = src.ui # copy target options from repo
1555 src = src.ui # copy target options from repo
1559 else: # assume it's a global ui object
1556 else: # assume it's a global ui object
1560 dst = src.copy() # keep all global options
1557 dst = src.copy() # keep all global options
1561
1558
1562 # copy ssh-specific options
1559 # copy ssh-specific options
1563 for o in b'ssh', b'remotecmd':
1560 for o in b'ssh', b'remotecmd':
1564 v = opts.get(o) or src.config(b'ui', o)
1561 v = opts.get(o) or src.config(b'ui', o)
1565 if v:
1562 if v:
1566 dst.setconfig(b"ui", o, v, b'copied')
1563 dst.setconfig(b"ui", o, v, b'copied')
1567
1564
1568 # copy bundle-specific options
1565 # copy bundle-specific options
1569 r = src.config(b'bundle', b'mainreporoot')
1566 r = src.config(b'bundle', b'mainreporoot')
1570 if r:
1567 if r:
1571 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1568 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1572
1569
1573 # copy selected local settings to the remote ui
1570 # copy selected local settings to the remote ui
1574 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1571 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1575 for key, val in src.configitems(sect):
1572 for key, val in src.configitems(sect):
1576 dst.setconfig(sect, key, val, b'copied')
1573 dst.setconfig(sect, key, val, b'copied')
1577 v = src.config(b'web', b'cacerts')
1574 v = src.config(b'web', b'cacerts')
1578 if v:
1575 if v:
1579 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1576 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1580
1577
1581 return dst
1578 return dst
1582
1579
1583
1580
1584 # Files of interest
1581 # Files of interest
1585 # Used to check if the repository has changed looking at mtime and size of
1582 # Used to check if the repository has changed looking at mtime and size of
1586 # these files.
1583 # these files.
1587 foi = [
1584 foi = [
1588 (b'spath', b'00changelog.i'),
1585 (b'spath', b'00changelog.i'),
1589 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1586 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1590 (b'spath', b'obsstore'),
1587 (b'spath', b'obsstore'),
1591 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1588 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1592 ]
1589 ]
1593
1590
1594
1591
1595 class cachedlocalrepo:
1592 class cachedlocalrepo:
1596 """Holds a localrepository that can be cached and reused."""
1593 """Holds a localrepository that can be cached and reused."""
1597
1594
1598 def __init__(self, repo):
1595 def __init__(self, repo):
1599 """Create a new cached repo from an existing repo.
1596 """Create a new cached repo from an existing repo.
1600
1597
1601 We assume the passed in repo was recently created. If the
1598 We assume the passed in repo was recently created. If the
1602 repo has changed between when it was created and when it was
1599 repo has changed between when it was created and when it was
1603 turned into a cache, it may not refresh properly.
1600 turned into a cache, it may not refresh properly.
1604 """
1601 """
1605 assert isinstance(repo, localrepo.localrepository)
1602 assert isinstance(repo, localrepo.localrepository)
1606 self._repo = repo
1603 self._repo = repo
1607 self._state, self.mtime = self._repostate()
1604 self._state, self.mtime = self._repostate()
1608 self._filtername = repo.filtername
1605 self._filtername = repo.filtername
1609
1606
1610 def fetch(self):
1607 def fetch(self):
1611 """Refresh (if necessary) and return a repository.
1608 """Refresh (if necessary) and return a repository.
1612
1609
1613 If the cached instance is out of date, it will be recreated
1610 If the cached instance is out of date, it will be recreated
1614 automatically and returned.
1611 automatically and returned.
1615
1612
1616 Returns a tuple of the repo and a boolean indicating whether a new
1613 Returns a tuple of the repo and a boolean indicating whether a new
1617 repo instance was created.
1614 repo instance was created.
1618 """
1615 """
1619 # We compare the mtimes and sizes of some well-known files to
1616 # We compare the mtimes and sizes of some well-known files to
1620 # determine if the repo changed. This is not precise, as mtimes
1617 # determine if the repo changed. This is not precise, as mtimes
1621 # are susceptible to clock skew and imprecise filesystems and
1618 # are susceptible to clock skew and imprecise filesystems and
1622 # file content can change while maintaining the same size.
1619 # file content can change while maintaining the same size.
1623
1620
1624 state, mtime = self._repostate()
1621 state, mtime = self._repostate()
1625 if state == self._state:
1622 if state == self._state:
1626 return self._repo, False
1623 return self._repo, False
1627
1624
1628 repo = repository(self._repo.baseui, self._repo.url())
1625 repo = repository(self._repo.baseui, self._repo.url())
1629 if self._filtername:
1626 if self._filtername:
1630 self._repo = repo.filtered(self._filtername)
1627 self._repo = repo.filtered(self._filtername)
1631 else:
1628 else:
1632 self._repo = repo.unfiltered()
1629 self._repo = repo.unfiltered()
1633 self._state = state
1630 self._state = state
1634 self.mtime = mtime
1631 self.mtime = mtime
1635
1632
1636 return self._repo, True
1633 return self._repo, True
1637
1634
1638 def _repostate(self):
1635 def _repostate(self):
1639 state = []
1636 state = []
1640 maxmtime = -1
1637 maxmtime = -1
1641 for attr, fname in foi:
1638 for attr, fname in foi:
1642 prefix = getattr(self._repo, attr)
1639 prefix = getattr(self._repo, attr)
1643 p = os.path.join(prefix, fname)
1640 p = os.path.join(prefix, fname)
1644 try:
1641 try:
1645 st = os.stat(p)
1642 st = os.stat(p)
1646 except OSError:
1643 except OSError:
1647 st = os.stat(prefix)
1644 st = os.stat(prefix)
1648 state.append((st[stat.ST_MTIME], st.st_size))
1645 state.append((st[stat.ST_MTIME], st.st_size))
1649 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1646 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1650
1647
1651 return tuple(state), maxmtime
1648 return tuple(state), maxmtime
1652
1649
1653 def copy(self):
1650 def copy(self):
1654 """Obtain a copy of this class instance.
1651 """Obtain a copy of this class instance.
1655
1652
1656 A new localrepository instance is obtained. The new instance should be
1653 A new localrepository instance is obtained. The new instance should be
1657 completely independent of the original.
1654 completely independent of the original.
1658 """
1655 """
1659 repo = repository(self._repo.baseui, self._repo.origroot)
1656 repo = repository(self._repo.baseui, self._repo.origroot)
1660 if self._filtername:
1657 if self._filtername:
1661 repo = repo.filtered(self._filtername)
1658 repo = repo.filtered(self._filtername)
1662 else:
1659 else:
1663 repo = repo.unfiltered()
1660 repo = repo.unfiltered()
1664 c = cachedlocalrepo(repo)
1661 c = cachedlocalrepo(repo)
1665 c._state = self._state
1662 c._state = self._state
1666 c.mtime = self.mtime
1663 c.mtime = self.mtime
1667 return c
1664 return c
General Comments 0
You need to be logged in to leave comments. Login now