##// END OF EJS Templates
peer-or-repo: split the scheme between repo and peer...
marmoute -
r50584:f73f02ef default
parent child Browse files
Show More
@@ -1,150 +1,158
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """extend schemes with shortcuts to repository swarms
6 """extend schemes with shortcuts to repository swarms
7
7
8 This extension allows you to specify shortcuts for parent URLs with a
8 This extension allows you to specify shortcuts for parent URLs with a
9 lot of repositories to act like a scheme, for example::
9 lot of repositories to act like a scheme, for example::
10
10
11 [schemes]
11 [schemes]
12 py = http://code.python.org/hg/
12 py = http://code.python.org/hg/
13
13
14 After that you can use it like::
14 After that you can use it like::
15
15
16 hg clone py://trunk/
16 hg clone py://trunk/
17
17
18 Additionally there is support for some more complex schemas, for
18 Additionally there is support for some more complex schemas, for
19 example used by Google Code::
19 example used by Google Code::
20
20
21 [schemes]
21 [schemes]
22 gcode = http://{1}.googlecode.com/hg/
22 gcode = http://{1}.googlecode.com/hg/
23
23
24 The syntax is taken from Mercurial templates, and you have unlimited
24 The syntax is taken from Mercurial templates, and you have unlimited
25 number of variables, starting with ``{1}`` and continuing with
25 number of variables, starting with ``{1}`` and continuing with
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 just appended to an URL.
28 just appended to an URL.
29
29
30 For convenience, the extension adds these schemes by default::
30 For convenience, the extension adds these schemes by default::
31
31
32 [schemes]
32 [schemes]
33 py = http://hg.python.org/
33 py = http://hg.python.org/
34 bb = https://bitbucket.org/
34 bb = https://bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
36 gcode = https://{1}.googlecode.com/hg/
36 gcode = https://{1}.googlecode.com/hg/
37 kiln = https://{1}.kilnhg.com/Repo/
37 kiln = https://{1}.kilnhg.com/Repo/
38
38
39 You can override a predefined scheme by defining a new scheme with the
39 You can override a predefined scheme by defining a new scheme with the
40 same name.
40 same name.
41 """
41 """
42
42
43 import os
43 import os
44 import re
44 import re
45
45
46 from mercurial.i18n import _
46 from mercurial.i18n import _
47 from mercurial import (
47 from mercurial import (
48 error,
48 error,
49 extensions,
49 extensions,
50 hg,
50 hg,
51 pycompat,
51 pycompat,
52 registrar,
52 registrar,
53 templater,
53 templater,
54 )
54 )
55 from mercurial.utils import (
55 from mercurial.utils import (
56 urlutil,
56 urlutil,
57 )
57 )
58
58
59 cmdtable = {}
59 cmdtable = {}
60 command = registrar.command(cmdtable)
60 command = registrar.command(cmdtable)
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 _partre = re.compile(br'{(\d+)\}')
67 _partre = re.compile(br'{(\d+)\}')
68
68
69
69
70 class ShortRepository:
70 class ShortRepository:
71 def __init__(self, url, scheme, templater):
71 def __init__(self, url, scheme, templater):
72 self.scheme = scheme
72 self.scheme = scheme
73 self.templater = templater
73 self.templater = templater
74 self.url = url
74 self.url = url
75 try:
75 try:
76 self.parts = max(map(int, _partre.findall(self.url)))
76 self.parts = max(map(int, _partre.findall(self.url)))
77 except ValueError:
77 except ValueError:
78 self.parts = 0
78 self.parts = 0
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return b'<ShortRepository: %s>' % self.scheme
81 return b'<ShortRepository: %s>' % self.scheme
82
82
83 def instance(self, ui, url, create, intents=None, createopts=None):
83 def instance(self, ui, url, create, intents=None, createopts=None):
84 url = self.resolve(url)
84 url = self.resolve(url)
85 return hg._peerlookup(url).instance(
85 return hg._peerlookup(url).instance(
86 ui, url, create, intents=intents, createopts=createopts
86 ui, url, create, intents=intents, createopts=createopts
87 )
87 )
88
88
89 def resolve(self, url):
89 def resolve(self, url):
90 # Should this use the urlutil.url class, or is manual parsing better?
90 # Should this use the urlutil.url class, or is manual parsing better?
91 try:
91 try:
92 url = url.split(b'://', 1)[1]
92 url = url.split(b'://', 1)[1]
93 except IndexError:
93 except IndexError:
94 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
94 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
95 parts = url.split(b'/', self.parts)
95 parts = url.split(b'/', self.parts)
96 if len(parts) > self.parts:
96 if len(parts) > self.parts:
97 tail = parts[-1]
97 tail = parts[-1]
98 parts = parts[:-1]
98 parts = parts[:-1]
99 else:
99 else:
100 tail = b''
100 tail = b''
101 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
101 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
102 return b''.join(self.templater.process(self.url, context)) + tail
102 return b''.join(self.templater.process(self.url, context)) + tail
103
103
104
104
105 def hasdriveletter(orig, path):
105 def hasdriveletter(orig, path):
106 if path:
106 if path:
107 for scheme in schemes:
107 for scheme in schemes:
108 if path.startswith(scheme + b':'):
108 if path.startswith(scheme + b':'):
109 return False
109 return False
110 return orig(path)
110 return orig(path)
111
111
112
112
113 schemes = {
113 schemes = {
114 b'py': b'http://hg.python.org/',
114 b'py': b'http://hg.python.org/',
115 b'bb': b'https://bitbucket.org/',
115 b'bb': b'https://bitbucket.org/',
116 b'bb+ssh': b'ssh://hg@bitbucket.org/',
116 b'bb+ssh': b'ssh://hg@bitbucket.org/',
117 b'gcode': b'https://{1}.googlecode.com/hg/',
117 b'gcode': b'https://{1}.googlecode.com/hg/',
118 b'kiln': b'https://{1}.kilnhg.com/Repo/',
118 b'kiln': b'https://{1}.kilnhg.com/Repo/',
119 }
119 }
120
120
121
121
122 def extsetup(ui):
122 def extsetup(ui):
123 schemes.update(dict(ui.configitems(b'schemes')))
123 schemes.update(dict(ui.configitems(b'schemes')))
124 t = templater.engine(templater.parse)
124 t = templater.engine(templater.parse)
125 for scheme, url in schemes.items():
125 for scheme, url in schemes.items():
126 if (
126 if (
127 pycompat.iswindows
127 pycompat.iswindows
128 and len(scheme) == 1
128 and len(scheme) == 1
129 and scheme.isalpha()
129 and scheme.isalpha()
130 and os.path.exists(b'%s:\\' % scheme)
130 and os.path.exists(b'%s:\\' % scheme)
131 ):
131 ):
132 raise error.Abort(
132 raise error.Abort(
133 _(
133 _(
134 b'custom scheme %s:// conflicts with drive '
134 b'custom scheme %s:// conflicts with drive '
135 b'letter %s:\\\n'
135 b'letter %s:\\\n'
136 )
136 )
137 % (scheme, scheme.upper())
137 % (scheme, scheme.upper())
138 )
138 )
139 hg.schemes[scheme] = ShortRepository(url, scheme, t)
139 url_scheme = urlutil.url(url).scheme
140 if url_scheme in hg.peer_schemes:
141 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
142 else:
143 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
140
144
141 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
145 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
142
146
143
147
144 @command(b'debugexpandscheme', norepo=True)
148 @command(b'debugexpandscheme', norepo=True)
145 def expandscheme(ui, url, **opts):
149 def expandscheme(ui, url, **opts):
146 """given a repo path, provide the scheme-expanded path"""
150 """given a repo path, provide the scheme-expanded path"""
147 repo = hg._peerlookup(url)
151 scheme = urlutil.url(url).scheme
148 if isinstance(repo, ShortRepository):
152 if scheme in hg.peer_schemes:
149 url = repo.resolve(url)
153 cls = hg.peer_schemes[scheme]
154 else:
155 cls = hg.repo_schemes.get(scheme)
156 if cls is not None and isinstance(cls, ShortRepository):
157 url = cls.resolve(url)
150 ui.write(url + b'\n')
158 ui.write(url + b'\n')
@@ -1,1618 +1,1624
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 b'static-http': statichttprepo,
151 }
152
153 peer_schemes = {
150 b'http': httppeer,
154 b'http': httppeer,
151 b'https': httppeer,
155 b'https': httppeer,
152 b'ssh': sshpeer,
156 b'ssh': sshpeer,
153 b'static-http': statichttprepo,
154 }
157 }
155
158
156
159
157 def _peerlookup(path):
160 def _peerlookup(path):
158 u = urlutil.url(path)
161 u = urlutil.url(path)
159 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
160 thing = schemes.get(scheme) or schemes[b'file']
163 if scheme in peer_schemes:
161 return thing
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
167 return LocalFactory
162
168
163
169
164 def islocal(repo):
170 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
167 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
168 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
169 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
170 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
171 return False
177 return False
172 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
173 return repo.local()
179 return repo.local()
174
180
175
181
176 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
185 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
187 else:
182 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
183
189
184
190
185 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
187
193
188
194
189 def _peerorrepo(
195 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
197 ):
192 """return a repository object for the specified path"""
198 """return a repository object for the specified path"""
193 cls = _peerlookup(path)
199 cls = _peerlookup(path)
194 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
195 _setup_repo_or_peer(ui, obj, presetupfuncs)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
196 return obj
202 return obj
197
203
198
204
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 ui = getattr(obj, "ui", ui)
206 ui = getattr(obj, "ui", ui)
201 for f in presetupfuncs or []:
207 for f in presetupfuncs or []:
202 f(ui, obj)
208 f(ui, obj)
203 ui.log(b'extension', b'- executing reposetup hooks\n')
209 ui.log(b'extension', b'- executing reposetup hooks\n')
204 with util.timedcm('all reposetup') as allreposetupstats:
210 with util.timedcm('all reposetup') as allreposetupstats:
205 for name, module in extensions.extensions(ui):
211 for name, module in extensions.extensions(ui):
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 hook = getattr(module, 'reposetup', None)
213 hook = getattr(module, 'reposetup', None)
208 if hook:
214 if hook:
209 with util.timedcm('reposetup %r', name) as stats:
215 with util.timedcm('reposetup %r', name) as stats:
210 hook(ui, obj)
216 hook(ui, obj)
211 msg = b' > reposetup for %s took %s\n'
217 msg = b' > reposetup for %s took %s\n'
212 ui.log(b'extension', msg, name, stats)
218 ui.log(b'extension', msg, name, stats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 if not obj.local():
220 if not obj.local():
215 for f in wirepeersetupfuncs:
221 for f in wirepeersetupfuncs:
216 f(ui, obj)
222 f(ui, obj)
217
223
218
224
219 def repository(
225 def repository(
220 ui,
226 ui,
221 path=b'',
227 path=b'',
222 create=False,
228 create=False,
223 presetupfuncs=None,
229 presetupfuncs=None,
224 intents=None,
230 intents=None,
225 createopts=None,
231 createopts=None,
226 ):
232 ):
227 """return a repository object for the specified path"""
233 """return a repository object for the specified path"""
228 peer = _peerorrepo(
234 peer = _peerorrepo(
229 ui,
235 ui,
230 path,
236 path,
231 create,
237 create,
232 presetupfuncs=presetupfuncs,
238 presetupfuncs=presetupfuncs,
233 intents=intents,
239 intents=intents,
234 createopts=createopts,
240 createopts=createopts,
235 )
241 )
236 repo = peer.local()
242 repo = peer.local()
237 if not repo:
243 if not repo:
238 raise error.Abort(
244 raise error.Abort(
239 _(b"repository '%s' is not local") % (path or peer.url())
245 _(b"repository '%s' is not local") % (path or peer.url())
240 )
246 )
241 return repo.filtered(b'visible')
247 return repo.filtered(b'visible')
242
248
243
249
244 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
245 '''return a repository peer for the specified path'''
251 '''return a repository peer for the specified path'''
246 rui = remoteui(uiorrepo, opts)
252 rui = remoteui(uiorrepo, opts)
247 return _peerorrepo(
253 return _peerorrepo(
248 rui, path, create, intents=intents, createopts=createopts
254 rui, path, create, intents=intents, createopts=createopts
249 ).peer()
255 ).peer()
250
256
251
257
252 def defaultdest(source):
258 def defaultdest(source):
253 """return default destination of clone if none is given
259 """return default destination of clone if none is given
254
260
255 >>> defaultdest(b'foo')
261 >>> defaultdest(b'foo')
256 'foo'
262 'foo'
257 >>> defaultdest(b'/foo/bar')
263 >>> defaultdest(b'/foo/bar')
258 'bar'
264 'bar'
259 >>> defaultdest(b'/')
265 >>> defaultdest(b'/')
260 ''
266 ''
261 >>> defaultdest(b'')
267 >>> defaultdest(b'')
262 ''
268 ''
263 >>> defaultdest(b'http://example.org/')
269 >>> defaultdest(b'http://example.org/')
264 ''
270 ''
265 >>> defaultdest(b'http://example.org/foo/')
271 >>> defaultdest(b'http://example.org/foo/')
266 'foo'
272 'foo'
267 """
273 """
268 path = urlutil.url(source).path
274 path = urlutil.url(source).path
269 if not path:
275 if not path:
270 return b''
276 return b''
271 return os.path.basename(os.path.normpath(path))
277 return os.path.basename(os.path.normpath(path))
272
278
273
279
274 def sharedreposource(repo):
280 def sharedreposource(repo):
275 """Returns repository object for source repository of a shared repo.
281 """Returns repository object for source repository of a shared repo.
276
282
277 If repo is not a shared repository, returns None.
283 If repo is not a shared repository, returns None.
278 """
284 """
279 if repo.sharedpath == repo.path:
285 if repo.sharedpath == repo.path:
280 return None
286 return None
281
287
282 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
283 return repo.srcrepo
289 return repo.srcrepo
284
290
285 # the sharedpath always ends in the .hg; we want the path to the repo
291 # the sharedpath always ends in the .hg; we want the path to the repo
286 source = repo.vfs.split(repo.sharedpath)[0]
292 source = repo.vfs.split(repo.sharedpath)[0]
287 srcurl, branches = urlutil.parseurl(source)
293 srcurl, branches = urlutil.parseurl(source)
288 srcrepo = repository(repo.ui, srcurl)
294 srcrepo = repository(repo.ui, srcurl)
289 repo.srcrepo = srcrepo
295 repo.srcrepo = srcrepo
290 return srcrepo
296 return srcrepo
291
297
292
298
293 def share(
299 def share(
294 ui,
300 ui,
295 source,
301 source,
296 dest=None,
302 dest=None,
297 update=True,
303 update=True,
298 bookmarks=True,
304 bookmarks=True,
299 defaultpath=None,
305 defaultpath=None,
300 relative=False,
306 relative=False,
301 ):
307 ):
302 '''create a shared repository'''
308 '''create a shared repository'''
303
309
304 not_local_msg = _(b'can only share local repositories')
310 not_local_msg = _(b'can only share local repositories')
305 if util.safehasattr(source, 'local'):
311 if util.safehasattr(source, 'local'):
306 if source.local() is None:
312 if source.local() is None:
307 raise error.Abort(not_local_msg)
313 raise error.Abort(not_local_msg)
308 elif not islocal(source):
314 elif not islocal(source):
309 # XXX why are we getting bytes here ?
315 # XXX why are we getting bytes here ?
310 raise error.Abort(not_local_msg)
316 raise error.Abort(not_local_msg)
311
317
312 if not dest:
318 if not dest:
313 dest = defaultdest(source)
319 dest = defaultdest(source)
314 else:
320 else:
315 dest = urlutil.get_clone_path(ui, dest)[1]
321 dest = urlutil.get_clone_path(ui, dest)[1]
316
322
317 if isinstance(source, bytes):
323 if isinstance(source, bytes):
318 origsource, source, branches = urlutil.get_clone_path(ui, source)
324 origsource, source, branches = urlutil.get_clone_path(ui, source)
319 srcrepo = repository(ui, source)
325 srcrepo = repository(ui, source)
320 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
321 else:
327 else:
322 srcrepo = source.local()
328 srcrepo = source.local()
323 checkout = None
329 checkout = None
324
330
325 shareditems = set()
331 shareditems = set()
326 if bookmarks:
332 if bookmarks:
327 shareditems.add(sharedbookmarks)
333 shareditems.add(sharedbookmarks)
328
334
329 r = repository(
335 r = repository(
330 ui,
336 ui,
331 dest,
337 dest,
332 create=True,
338 create=True,
333 createopts={
339 createopts={
334 b'sharedrepo': srcrepo,
340 b'sharedrepo': srcrepo,
335 b'sharedrelative': relative,
341 b'sharedrelative': relative,
336 b'shareditems': shareditems,
342 b'shareditems': shareditems,
337 },
343 },
338 )
344 )
339
345
340 postshare(srcrepo, r, defaultpath=defaultpath)
346 postshare(srcrepo, r, defaultpath=defaultpath)
341 r = repository(ui, dest)
347 r = repository(ui, dest)
342 _postshareupdate(r, update, checkout=checkout)
348 _postshareupdate(r, update, checkout=checkout)
343 return r
349 return r
344
350
345
351
346 def _prependsourcehgrc(repo):
352 def _prependsourcehgrc(repo):
347 """copies the source repo config and prepend it in current repo .hg/hgrc
353 """copies the source repo config and prepend it in current repo .hg/hgrc
348 on unshare. This is only done if the share was perfomed using share safe
354 on unshare. This is only done if the share was perfomed using share safe
349 method where we share config of source in shares"""
355 method where we share config of source in shares"""
350 srcvfs = vfsmod.vfs(repo.sharedpath)
356 srcvfs = vfsmod.vfs(repo.sharedpath)
351 dstvfs = vfsmod.vfs(repo.path)
357 dstvfs = vfsmod.vfs(repo.path)
352
358
353 if not srcvfs.exists(b'hgrc'):
359 if not srcvfs.exists(b'hgrc'):
354 return
360 return
355
361
356 currentconfig = b''
362 currentconfig = b''
357 if dstvfs.exists(b'hgrc'):
363 if dstvfs.exists(b'hgrc'):
358 currentconfig = dstvfs.read(b'hgrc')
364 currentconfig = dstvfs.read(b'hgrc')
359
365
360 with dstvfs(b'hgrc', b'wb') as fp:
366 with dstvfs(b'hgrc', b'wb') as fp:
361 sourceconfig = srcvfs.read(b'hgrc')
367 sourceconfig = srcvfs.read(b'hgrc')
362 fp.write(b"# Config copied from shared source\n")
368 fp.write(b"# Config copied from shared source\n")
363 fp.write(sourceconfig)
369 fp.write(sourceconfig)
364 fp.write(b'\n')
370 fp.write(b'\n')
365 fp.write(currentconfig)
371 fp.write(currentconfig)
366
372
367
373
368 def unshare(ui, repo):
374 def unshare(ui, repo):
369 """convert a shared repository to a normal one
375 """convert a shared repository to a normal one
370
376
371 Copy the store data to the repo and remove the sharedpath data.
377 Copy the store data to the repo and remove the sharedpath data.
372
378
373 Returns a new repository object representing the unshared repository.
379 Returns a new repository object representing the unshared repository.
374
380
375 The passed repository object is not usable after this function is
381 The passed repository object is not usable after this function is
376 called.
382 called.
377 """
383 """
378
384
379 with repo.lock():
385 with repo.lock():
380 # we use locks here because if we race with commit, we
386 # we use locks here because if we race with commit, we
381 # can end up with extra data in the cloned revlogs that's
387 # can end up with extra data in the cloned revlogs that's
382 # not pointed to by changesets, thus causing verify to
388 # not pointed to by changesets, thus causing verify to
383 # fail
389 # fail
384 destlock = copystore(ui, repo, repo.path)
390 destlock = copystore(ui, repo, repo.path)
385 with destlock or util.nullcontextmanager():
391 with destlock or util.nullcontextmanager():
386 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
387 # we were sharing .hg/hgrc of the share source with the current
393 # we were sharing .hg/hgrc of the share source with the current
388 # repo. We need to copy that while unsharing otherwise it can
394 # repo. We need to copy that while unsharing otherwise it can
389 # disable hooks and other checks
395 # disable hooks and other checks
390 _prependsourcehgrc(repo)
396 _prependsourcehgrc(repo)
391
397
392 sharefile = repo.vfs.join(b'sharedpath')
398 sharefile = repo.vfs.join(b'sharedpath')
393 util.rename(sharefile, sharefile + b'.old')
399 util.rename(sharefile, sharefile + b'.old')
394
400
395 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
396 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
397 scmutil.writereporequirements(repo)
403 scmutil.writereporequirements(repo)
398
404
399 # Removing share changes some fundamental properties of the repo instance.
405 # Removing share changes some fundamental properties of the repo instance.
400 # So we instantiate a new repo object and operate on it rather than
406 # So we instantiate a new repo object and operate on it rather than
401 # try to keep the existing repo usable.
407 # try to keep the existing repo usable.
402 newrepo = repository(repo.baseui, repo.root, create=False)
408 newrepo = repository(repo.baseui, repo.root, create=False)
403
409
404 # TODO: figure out how to access subrepos that exist, but were previously
410 # TODO: figure out how to access subrepos that exist, but were previously
405 # removed from .hgsub
411 # removed from .hgsub
406 c = newrepo[b'.']
412 c = newrepo[b'.']
407 subs = c.substate
413 subs = c.substate
408 for s in sorted(subs):
414 for s in sorted(subs):
409 c.sub(s).unshare()
415 c.sub(s).unshare()
410
416
411 localrepo.poisonrepository(repo)
417 localrepo.poisonrepository(repo)
412
418
413 return newrepo
419 return newrepo
414
420
415
421
416 def postshare(sourcerepo, destrepo, defaultpath=None):
422 def postshare(sourcerepo, destrepo, defaultpath=None):
417 """Called after a new shared repo is created.
423 """Called after a new shared repo is created.
418
424
419 The new repo only has a requirements file and pointer to the source.
425 The new repo only has a requirements file and pointer to the source.
420 This function configures additional shared data.
426 This function configures additional shared data.
421
427
422 Extensions can wrap this function and write additional entries to
428 Extensions can wrap this function and write additional entries to
423 destrepo/.hg/shared to indicate additional pieces of data to be shared.
429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
424 """
430 """
425 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
426 if default:
432 if default:
427 template = b'[paths]\ndefault = %s\n'
433 template = b'[paths]\ndefault = %s\n'
428 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
429 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
430 with destrepo.wlock():
436 with destrepo.wlock():
431 narrowspec.copytoworkingcopy(destrepo)
437 narrowspec.copytoworkingcopy(destrepo)
432
438
433
439
434 def _postshareupdate(repo, update, checkout=None):
440 def _postshareupdate(repo, update, checkout=None):
435 """Maybe perform a working directory update after a shared repo is created.
441 """Maybe perform a working directory update after a shared repo is created.
436
442
437 ``update`` can be a boolean or a revision to update to.
443 ``update`` can be a boolean or a revision to update to.
438 """
444 """
439 if not update:
445 if not update:
440 return
446 return
441
447
442 repo.ui.status(_(b"updating working directory\n"))
448 repo.ui.status(_(b"updating working directory\n"))
443 if update is not True:
449 if update is not True:
444 checkout = update
450 checkout = update
445 for test in (checkout, b'default', b'tip'):
451 for test in (checkout, b'default', b'tip'):
446 if test is None:
452 if test is None:
447 continue
453 continue
448 try:
454 try:
449 uprev = repo.lookup(test)
455 uprev = repo.lookup(test)
450 break
456 break
451 except error.RepoLookupError:
457 except error.RepoLookupError:
452 continue
458 continue
453 _update(repo, uprev)
459 _update(repo, uprev)
454
460
455
461
456 def copystore(ui, srcrepo, destpath):
462 def copystore(ui, srcrepo, destpath):
457 """copy files from store of srcrepo in destpath
463 """copy files from store of srcrepo in destpath
458
464
459 returns destlock
465 returns destlock
460 """
466 """
461 destlock = None
467 destlock = None
462 try:
468 try:
463 hardlink = None
469 hardlink = None
464 topic = _(b'linking') if hardlink else _(b'copying')
470 topic = _(b'linking') if hardlink else _(b'copying')
465 with ui.makeprogress(topic, unit=_(b'files')) as progress:
471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
466 num = 0
472 num = 0
467 srcpublishing = srcrepo.publishing()
473 srcpublishing = srcrepo.publishing()
468 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
469 dstvfs = vfsmod.vfs(destpath)
475 dstvfs = vfsmod.vfs(destpath)
470 for f in srcrepo.store.copylist():
476 for f in srcrepo.store.copylist():
471 if srcpublishing and f.endswith(b'phaseroots'):
477 if srcpublishing and f.endswith(b'phaseroots'):
472 continue
478 continue
473 dstbase = os.path.dirname(f)
479 dstbase = os.path.dirname(f)
474 if dstbase and not dstvfs.exists(dstbase):
480 if dstbase and not dstvfs.exists(dstbase):
475 dstvfs.mkdir(dstbase)
481 dstvfs.mkdir(dstbase)
476 if srcvfs.exists(f):
482 if srcvfs.exists(f):
477 if f.endswith(b'data'):
483 if f.endswith(b'data'):
478 # 'dstbase' may be empty (e.g. revlog format 0)
484 # 'dstbase' may be empty (e.g. revlog format 0)
479 lockfile = os.path.join(dstbase, b"lock")
485 lockfile = os.path.join(dstbase, b"lock")
480 # lock to avoid premature writing to the target
486 # lock to avoid premature writing to the target
481 destlock = lock.lock(dstvfs, lockfile)
487 destlock = lock.lock(dstvfs, lockfile)
482 hardlink, n = util.copyfiles(
488 hardlink, n = util.copyfiles(
483 srcvfs.join(f), dstvfs.join(f), hardlink, progress
489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
484 )
490 )
485 num += n
491 num += n
486 if hardlink:
492 if hardlink:
487 ui.debug(b"linked %d files\n" % num)
493 ui.debug(b"linked %d files\n" % num)
488 else:
494 else:
489 ui.debug(b"copied %d files\n" % num)
495 ui.debug(b"copied %d files\n" % num)
490 return destlock
496 return destlock
491 except: # re-raises
497 except: # re-raises
492 release(destlock)
498 release(destlock)
493 raise
499 raise
494
500
495
501
496 def clonewithshare(
502 def clonewithshare(
497 ui,
503 ui,
498 peeropts,
504 peeropts,
499 sharepath,
505 sharepath,
500 source,
506 source,
501 srcpeer,
507 srcpeer,
502 dest,
508 dest,
503 pull=False,
509 pull=False,
504 rev=None,
510 rev=None,
505 update=True,
511 update=True,
506 stream=False,
512 stream=False,
507 ):
513 ):
508 """Perform a clone using a shared repo.
514 """Perform a clone using a shared repo.
509
515
510 The store for the repository will be located at <sharepath>/.hg. The
516 The store for the repository will be located at <sharepath>/.hg. The
511 specified revisions will be cloned or pulled from "source". A shared repo
517 specified revisions will be cloned or pulled from "source". A shared repo
512 will be created at "dest" and a working copy will be created if "update" is
518 will be created at "dest" and a working copy will be created if "update" is
513 True.
519 True.
514 """
520 """
515 revs = None
521 revs = None
516 if rev:
522 if rev:
517 if not srcpeer.capable(b'lookup'):
523 if not srcpeer.capable(b'lookup'):
518 raise error.Abort(
524 raise error.Abort(
519 _(
525 _(
520 b"src repository does not support "
526 b"src repository does not support "
521 b"revision lookup and so doesn't "
527 b"revision lookup and so doesn't "
522 b"support clone by revision"
528 b"support clone by revision"
523 )
529 )
524 )
530 )
525
531
526 # TODO this is batchable.
532 # TODO this is batchable.
527 remoterevs = []
533 remoterevs = []
528 for r in rev:
534 for r in rev:
529 with srcpeer.commandexecutor() as e:
535 with srcpeer.commandexecutor() as e:
530 remoterevs.append(
536 remoterevs.append(
531 e.callcommand(
537 e.callcommand(
532 b'lookup',
538 b'lookup',
533 {
539 {
534 b'key': r,
540 b'key': r,
535 },
541 },
536 ).result()
542 ).result()
537 )
543 )
538 revs = remoterevs
544 revs = remoterevs
539
545
540 # Obtain a lock before checking for or cloning the pooled repo otherwise
546 # Obtain a lock before checking for or cloning the pooled repo otherwise
541 # 2 clients may race creating or populating it.
547 # 2 clients may race creating or populating it.
542 pooldir = os.path.dirname(sharepath)
548 pooldir = os.path.dirname(sharepath)
543 # lock class requires the directory to exist.
549 # lock class requires the directory to exist.
544 try:
550 try:
545 util.makedir(pooldir, False)
551 util.makedir(pooldir, False)
546 except FileExistsError:
552 except FileExistsError:
547 pass
553 pass
548
554
549 poolvfs = vfsmod.vfs(pooldir)
555 poolvfs = vfsmod.vfs(pooldir)
550 basename = os.path.basename(sharepath)
556 basename = os.path.basename(sharepath)
551
557
552 with lock.lock(poolvfs, b'%s.lock' % basename):
558 with lock.lock(poolvfs, b'%s.lock' % basename):
553 if os.path.exists(sharepath):
559 if os.path.exists(sharepath):
554 ui.status(
560 ui.status(
555 _(b'(sharing from existing pooled repository %s)\n') % basename
561 _(b'(sharing from existing pooled repository %s)\n') % basename
556 )
562 )
557 else:
563 else:
558 ui.status(
564 ui.status(
559 _(b'(sharing from new pooled repository %s)\n') % basename
565 _(b'(sharing from new pooled repository %s)\n') % basename
560 )
566 )
561 # Always use pull mode because hardlinks in share mode don't work
567 # Always use pull mode because hardlinks in share mode don't work
562 # well. Never update because working copies aren't necessary in
568 # well. Never update because working copies aren't necessary in
563 # share mode.
569 # share mode.
564 clone(
570 clone(
565 ui,
571 ui,
566 peeropts,
572 peeropts,
567 source,
573 source,
568 dest=sharepath,
574 dest=sharepath,
569 pull=True,
575 pull=True,
570 revs=rev,
576 revs=rev,
571 update=False,
577 update=False,
572 stream=stream,
578 stream=stream,
573 )
579 )
574
580
575 # Resolve the value to put in [paths] section for the source.
581 # Resolve the value to put in [paths] section for the source.
576 if islocal(source):
582 if islocal(source):
577 defaultpath = util.abspath(urlutil.urllocalpath(source))
583 defaultpath = util.abspath(urlutil.urllocalpath(source))
578 else:
584 else:
579 defaultpath = source
585 defaultpath = source
580
586
581 sharerepo = repository(ui, path=sharepath)
587 sharerepo = repository(ui, path=sharepath)
582 destrepo = share(
588 destrepo = share(
583 ui,
589 ui,
584 sharerepo,
590 sharerepo,
585 dest=dest,
591 dest=dest,
586 update=False,
592 update=False,
587 bookmarks=False,
593 bookmarks=False,
588 defaultpath=defaultpath,
594 defaultpath=defaultpath,
589 )
595 )
590
596
591 # We need to perform a pull against the dest repo to fetch bookmarks
597 # We need to perform a pull against the dest repo to fetch bookmarks
592 # and other non-store data that isn't shared by default. In the case of
598 # and other non-store data that isn't shared by default. In the case of
593 # non-existing shared repo, this means we pull from the remote twice. This
599 # non-existing shared repo, this means we pull from the remote twice. This
594 # is a bit weird. But at the time it was implemented, there wasn't an easy
600 # is a bit weird. But at the time it was implemented, there wasn't an easy
595 # way to pull just non-changegroup data.
601 # way to pull just non-changegroup data.
596 exchange.pull(destrepo, srcpeer, heads=revs)
602 exchange.pull(destrepo, srcpeer, heads=revs)
597
603
598 _postshareupdate(destrepo, update)
604 _postshareupdate(destrepo, update)
599
605
600 return srcpeer, peer(ui, peeropts, dest)
606 return srcpeer, peer(ui, peeropts, dest)
601
607
602
608
603 # Recomputing caches is often slow on big repos, so copy them.
609 # Recomputing caches is often slow on big repos, so copy them.
604 def _copycache(srcrepo, dstcachedir, fname):
610 def _copycache(srcrepo, dstcachedir, fname):
605 """copy a cache from srcrepo to destcachedir (if it exists)"""
611 """copy a cache from srcrepo to destcachedir (if it exists)"""
606 srcfname = srcrepo.cachevfs.join(fname)
612 srcfname = srcrepo.cachevfs.join(fname)
607 dstfname = os.path.join(dstcachedir, fname)
613 dstfname = os.path.join(dstcachedir, fname)
608 if os.path.exists(srcfname):
614 if os.path.exists(srcfname):
609 if not os.path.exists(dstcachedir):
615 if not os.path.exists(dstcachedir):
610 os.mkdir(dstcachedir)
616 os.mkdir(dstcachedir)
611 util.copyfile(srcfname, dstfname)
617 util.copyfile(srcfname, dstfname)
612
618
613
619
614 def clone(
620 def clone(
615 ui,
621 ui,
616 peeropts,
622 peeropts,
617 source,
623 source,
618 dest=None,
624 dest=None,
619 pull=False,
625 pull=False,
620 revs=None,
626 revs=None,
621 update=True,
627 update=True,
622 stream=False,
628 stream=False,
623 branch=None,
629 branch=None,
624 shareopts=None,
630 shareopts=None,
625 storeincludepats=None,
631 storeincludepats=None,
626 storeexcludepats=None,
632 storeexcludepats=None,
627 depth=None,
633 depth=None,
628 ):
634 ):
629 """Make a copy of an existing repository.
635 """Make a copy of an existing repository.
630
636
631 Create a copy of an existing repository in a new directory. The
637 Create a copy of an existing repository in a new directory. The
632 source and destination are URLs, as passed to the repository
638 source and destination are URLs, as passed to the repository
633 function. Returns a pair of repository peers, the source and
639 function. Returns a pair of repository peers, the source and
634 newly created destination.
640 newly created destination.
635
641
636 The location of the source is added to the new repository's
642 The location of the source is added to the new repository's
637 .hg/hgrc file, as the default to be used for future pulls and
643 .hg/hgrc file, as the default to be used for future pulls and
638 pushes.
644 pushes.
639
645
640 If an exception is raised, the partly cloned/updated destination
646 If an exception is raised, the partly cloned/updated destination
641 repository will be deleted.
647 repository will be deleted.
642
648
643 Arguments:
649 Arguments:
644
650
645 source: repository object or URL
651 source: repository object or URL
646
652
647 dest: URL of destination repository to create (defaults to base
653 dest: URL of destination repository to create (defaults to base
648 name of source repository)
654 name of source repository)
649
655
650 pull: always pull from source repository, even in local case or if the
656 pull: always pull from source repository, even in local case or if the
651 server prefers streaming
657 server prefers streaming
652
658
653 stream: stream raw data uncompressed from repository (fast over
659 stream: stream raw data uncompressed from repository (fast over
654 LAN, slow over WAN)
660 LAN, slow over WAN)
655
661
656 revs: revision to clone up to (implies pull=True)
662 revs: revision to clone up to (implies pull=True)
657
663
658 update: update working directory after clone completes, if
664 update: update working directory after clone completes, if
659 destination is local repository (True means update to default rev,
665 destination is local repository (True means update to default rev,
660 anything else is treated as a revision)
666 anything else is treated as a revision)
661
667
662 branch: branches to clone
668 branch: branches to clone
663
669
664 shareopts: dict of options to control auto sharing behavior. The "pool" key
670 shareopts: dict of options to control auto sharing behavior. The "pool" key
665 activates auto sharing mode and defines the directory for stores. The
671 activates auto sharing mode and defines the directory for stores. The
666 "mode" key determines how to construct the directory name of the shared
672 "mode" key determines how to construct the directory name of the shared
667 repository. "identity" means the name is derived from the node of the first
673 repository. "identity" means the name is derived from the node of the first
668 changeset in the repository. "remote" means the name is derived from the
674 changeset in the repository. "remote" means the name is derived from the
669 remote's path/URL. Defaults to "identity."
675 remote's path/URL. Defaults to "identity."
670
676
671 storeincludepats and storeexcludepats: sets of file patterns to include and
677 storeincludepats and storeexcludepats: sets of file patterns to include and
672 exclude in the repository copy, respectively. If not defined, all files
678 exclude in the repository copy, respectively. If not defined, all files
673 will be included (a "full" clone). Otherwise a "narrow" clone containing
679 will be included (a "full" clone). Otherwise a "narrow" clone containing
674 only the requested files will be performed. If ``storeincludepats`` is not
680 only the requested files will be performed. If ``storeincludepats`` is not
675 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
676 ``path:.``. If both are empty sets, no files will be cloned.
682 ``path:.``. If both are empty sets, no files will be cloned.
677 """
683 """
678
684
679 if isinstance(source, bytes):
685 if isinstance(source, bytes):
680 src = urlutil.get_clone_path(ui, source, branch)
686 src = urlutil.get_clone_path(ui, source, branch)
681 origsource, source, branches = src
687 origsource, source, branches = src
682 srcpeer = peer(ui, peeropts, source)
688 srcpeer = peer(ui, peeropts, source)
683 else:
689 else:
684 srcpeer = source.peer() # in case we were called with a localrepo
690 srcpeer = source.peer() # in case we were called with a localrepo
685 branches = (None, branch or [])
691 branches = (None, branch or [])
686 origsource = source = srcpeer.url()
692 origsource = source = srcpeer.url()
687 srclock = destlock = destwlock = cleandir = None
693 srclock = destlock = destwlock = cleandir = None
688 destpeer = None
694 destpeer = None
689 try:
695 try:
690 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
691
697
692 if dest is None:
698 if dest is None:
693 dest = defaultdest(source)
699 dest = defaultdest(source)
694 if dest:
700 if dest:
695 ui.status(_(b"destination directory: %s\n") % dest)
701 ui.status(_(b"destination directory: %s\n") % dest)
696 else:
702 else:
697 dest = urlutil.get_clone_path(ui, dest)[0]
703 dest = urlutil.get_clone_path(ui, dest)[0]
698
704
699 dest = urlutil.urllocalpath(dest)
705 dest = urlutil.urllocalpath(dest)
700 source = urlutil.urllocalpath(source)
706 source = urlutil.urllocalpath(source)
701
707
702 if not dest:
708 if not dest:
703 raise error.InputError(_(b"empty destination path is not valid"))
709 raise error.InputError(_(b"empty destination path is not valid"))
704
710
705 destvfs = vfsmod.vfs(dest, expandpath=True)
711 destvfs = vfsmod.vfs(dest, expandpath=True)
706 if destvfs.lexists():
712 if destvfs.lexists():
707 if not destvfs.isdir():
713 if not destvfs.isdir():
708 raise error.InputError(
714 raise error.InputError(
709 _(b"destination '%s' already exists") % dest
715 _(b"destination '%s' already exists") % dest
710 )
716 )
711 elif destvfs.listdir():
717 elif destvfs.listdir():
712 raise error.InputError(
718 raise error.InputError(
713 _(b"destination '%s' is not empty") % dest
719 _(b"destination '%s' is not empty") % dest
714 )
720 )
715
721
716 createopts = {}
722 createopts = {}
717 narrow = False
723 narrow = False
718
724
719 if storeincludepats is not None:
725 if storeincludepats is not None:
720 narrowspec.validatepatterns(storeincludepats)
726 narrowspec.validatepatterns(storeincludepats)
721 narrow = True
727 narrow = True
722
728
723 if storeexcludepats is not None:
729 if storeexcludepats is not None:
724 narrowspec.validatepatterns(storeexcludepats)
730 narrowspec.validatepatterns(storeexcludepats)
725 narrow = True
731 narrow = True
726
732
727 if narrow:
733 if narrow:
728 # Include everything by default if only exclusion patterns defined.
734 # Include everything by default if only exclusion patterns defined.
729 if storeexcludepats and not storeincludepats:
735 if storeexcludepats and not storeincludepats:
730 storeincludepats = {b'path:.'}
736 storeincludepats = {b'path:.'}
731
737
732 createopts[b'narrowfiles'] = True
738 createopts[b'narrowfiles'] = True
733
739
734 if depth:
740 if depth:
735 createopts[b'shallowfilestore'] = True
741 createopts[b'shallowfilestore'] = True
736
742
737 if srcpeer.capable(b'lfs-serve'):
743 if srcpeer.capable(b'lfs-serve'):
738 # Repository creation honors the config if it disabled the extension, so
744 # Repository creation honors the config if it disabled the extension, so
739 # we can't just announce that lfs will be enabled. This check avoids
745 # we can't just announce that lfs will be enabled. This check avoids
740 # saying that lfs will be enabled, and then saying it's an unknown
746 # saying that lfs will be enabled, and then saying it's an unknown
741 # feature. The lfs creation option is set in either case so that a
747 # feature. The lfs creation option is set in either case so that a
742 # requirement is added. If the extension is explicitly disabled but the
748 # requirement is added. If the extension is explicitly disabled but the
743 # requirement is set, the clone aborts early, before transferring any
749 # requirement is set, the clone aborts early, before transferring any
744 # data.
750 # data.
745 createopts[b'lfs'] = True
751 createopts[b'lfs'] = True
746
752
747 if extensions.disabled_help(b'lfs'):
753 if extensions.disabled_help(b'lfs'):
748 ui.status(
754 ui.status(
749 _(
755 _(
750 b'(remote is using large file support (lfs), but it is '
756 b'(remote is using large file support (lfs), but it is '
751 b'explicitly disabled in the local configuration)\n'
757 b'explicitly disabled in the local configuration)\n'
752 )
758 )
753 )
759 )
754 else:
760 else:
755 ui.status(
761 ui.status(
756 _(
762 _(
757 b'(remote is using large file support (lfs); lfs will '
763 b'(remote is using large file support (lfs); lfs will '
758 b'be enabled for this repository)\n'
764 b'be enabled for this repository)\n'
759 )
765 )
760 )
766 )
761
767
762 shareopts = shareopts or {}
768 shareopts = shareopts or {}
763 sharepool = shareopts.get(b'pool')
769 sharepool = shareopts.get(b'pool')
764 sharenamemode = shareopts.get(b'mode')
770 sharenamemode = shareopts.get(b'mode')
765 if sharepool and islocal(dest):
771 if sharepool and islocal(dest):
766 sharepath = None
772 sharepath = None
767 if sharenamemode == b'identity':
773 if sharenamemode == b'identity':
768 # Resolve the name from the initial changeset in the remote
774 # Resolve the name from the initial changeset in the remote
769 # repository. This returns nullid when the remote is empty. It
775 # repository. This returns nullid when the remote is empty. It
770 # raises RepoLookupError if revision 0 is filtered or otherwise
776 # raises RepoLookupError if revision 0 is filtered or otherwise
771 # not available. If we fail to resolve, sharing is not enabled.
777 # not available. If we fail to resolve, sharing is not enabled.
772 try:
778 try:
773 with srcpeer.commandexecutor() as e:
779 with srcpeer.commandexecutor() as e:
774 rootnode = e.callcommand(
780 rootnode = e.callcommand(
775 b'lookup',
781 b'lookup',
776 {
782 {
777 b'key': b'0',
783 b'key': b'0',
778 },
784 },
779 ).result()
785 ).result()
780
786
781 if rootnode != sha1nodeconstants.nullid:
787 if rootnode != sha1nodeconstants.nullid:
782 sharepath = os.path.join(sharepool, hex(rootnode))
788 sharepath = os.path.join(sharepool, hex(rootnode))
783 else:
789 else:
784 ui.status(
790 ui.status(
785 _(
791 _(
786 b'(not using pooled storage: '
792 b'(not using pooled storage: '
787 b'remote appears to be empty)\n'
793 b'remote appears to be empty)\n'
788 )
794 )
789 )
795 )
790 except error.RepoLookupError:
796 except error.RepoLookupError:
791 ui.status(
797 ui.status(
792 _(
798 _(
793 b'(not using pooled storage: '
799 b'(not using pooled storage: '
794 b'unable to resolve identity of remote)\n'
800 b'unable to resolve identity of remote)\n'
795 )
801 )
796 )
802 )
797 elif sharenamemode == b'remote':
803 elif sharenamemode == b'remote':
798 sharepath = os.path.join(
804 sharepath = os.path.join(
799 sharepool, hex(hashutil.sha1(source).digest())
805 sharepool, hex(hashutil.sha1(source).digest())
800 )
806 )
801 else:
807 else:
802 raise error.Abort(
808 raise error.Abort(
803 _(b'unknown share naming mode: %s') % sharenamemode
809 _(b'unknown share naming mode: %s') % sharenamemode
804 )
810 )
805
811
806 # TODO this is a somewhat arbitrary restriction.
812 # TODO this is a somewhat arbitrary restriction.
807 if narrow:
813 if narrow:
808 ui.status(
814 ui.status(
809 _(b'(pooled storage not supported for narrow clones)\n')
815 _(b'(pooled storage not supported for narrow clones)\n')
810 )
816 )
811 sharepath = None
817 sharepath = None
812
818
813 if sharepath:
819 if sharepath:
814 return clonewithshare(
820 return clonewithshare(
815 ui,
821 ui,
816 peeropts,
822 peeropts,
817 sharepath,
823 sharepath,
818 source,
824 source,
819 srcpeer,
825 srcpeer,
820 dest,
826 dest,
821 pull=pull,
827 pull=pull,
822 rev=revs,
828 rev=revs,
823 update=update,
829 update=update,
824 stream=stream,
830 stream=stream,
825 )
831 )
826
832
827 srcrepo = srcpeer.local()
833 srcrepo = srcpeer.local()
828
834
829 abspath = origsource
835 abspath = origsource
830 if islocal(origsource):
836 if islocal(origsource):
831 abspath = util.abspath(urlutil.urllocalpath(origsource))
837 abspath = util.abspath(urlutil.urllocalpath(origsource))
832
838
833 if islocal(dest):
839 if islocal(dest):
834 if os.path.exists(dest):
840 if os.path.exists(dest):
835 # only clean up directories we create ourselves
841 # only clean up directories we create ourselves
836 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
837 cleandir = hgdir
843 cleandir = hgdir
838 else:
844 else:
839 cleandir = dest
845 cleandir = dest
840
846
841 copy = False
847 copy = False
842 if (
848 if (
843 srcrepo
849 srcrepo
844 and srcrepo.cancopy()
850 and srcrepo.cancopy()
845 and islocal(dest)
851 and islocal(dest)
846 and not phases.hassecret(srcrepo)
852 and not phases.hassecret(srcrepo)
847 ):
853 ):
848 copy = not pull and not revs
854 copy = not pull and not revs
849
855
850 # TODO this is a somewhat arbitrary restriction.
856 # TODO this is a somewhat arbitrary restriction.
851 if narrow:
857 if narrow:
852 copy = False
858 copy = False
853
859
854 if copy:
860 if copy:
855 try:
861 try:
856 # we use a lock here because if we race with commit, we
862 # we use a lock here because if we race with commit, we
857 # can end up with extra data in the cloned revlogs that's
863 # can end up with extra data in the cloned revlogs that's
858 # not pointed to by changesets, thus causing verify to
864 # not pointed to by changesets, thus causing verify to
859 # fail
865 # fail
860 srclock = srcrepo.lock(wait=False)
866 srclock = srcrepo.lock(wait=False)
861 except error.LockError:
867 except error.LockError:
862 copy = False
868 copy = False
863
869
864 if copy:
870 if copy:
865 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
866
872
867 destrootpath = urlutil.urllocalpath(dest)
873 destrootpath = urlutil.urllocalpath(dest)
868 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
869 localrepo.createrepository(
875 localrepo.createrepository(
870 ui,
876 ui,
871 destrootpath,
877 destrootpath,
872 requirements=dest_reqs,
878 requirements=dest_reqs,
873 )
879 )
874 destrepo = localrepo.makelocalrepository(ui, destrootpath)
880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
875
881
876 destwlock = destrepo.wlock()
882 destwlock = destrepo.wlock()
877 destlock = destrepo.lock()
883 destlock = destrepo.lock()
878 from . import streamclone # avoid cycle
884 from . import streamclone # avoid cycle
879
885
880 streamclone.local_copy(srcrepo, destrepo)
886 streamclone.local_copy(srcrepo, destrepo)
881
887
882 # we need to re-init the repo after manually copying the data
888 # we need to re-init the repo after manually copying the data
883 # into it
889 # into it
884 destpeer = peer(srcrepo, peeropts, dest)
890 destpeer = peer(srcrepo, peeropts, dest)
885
891
886 # make the peer aware that is it already locked
892 # make the peer aware that is it already locked
887 #
893 #
888 # important:
894 # important:
889 #
895 #
890 # We still need to release that lock at the end of the function
896 # We still need to release that lock at the end of the function
891 destpeer.local()._lockref = weakref.ref(destlock)
897 destpeer.local()._lockref = weakref.ref(destlock)
892 destpeer.local()._wlockref = weakref.ref(destwlock)
898 destpeer.local()._wlockref = weakref.ref(destwlock)
893 # dirstate also needs to be copied because `_wlockref` has a reference
899 # dirstate also needs to be copied because `_wlockref` has a reference
894 # to it: this dirstate is saved to disk when the wlock is released
900 # to it: this dirstate is saved to disk when the wlock is released
895 destpeer.local().dirstate = destrepo.dirstate
901 destpeer.local().dirstate = destrepo.dirstate
896
902
897 srcrepo.hook(
903 srcrepo.hook(
898 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
899 )
905 )
900 else:
906 else:
901 try:
907 try:
902 # only pass ui when no srcrepo
908 # only pass ui when no srcrepo
903 destpeer = peer(
909 destpeer = peer(
904 srcrepo or ui,
910 srcrepo or ui,
905 peeropts,
911 peeropts,
906 dest,
912 dest,
907 create=True,
913 create=True,
908 createopts=createopts,
914 createopts=createopts,
909 )
915 )
910 except FileExistsError:
916 except FileExistsError:
911 cleandir = None
917 cleandir = None
912 raise error.Abort(_(b"destination '%s' already exists") % dest)
918 raise error.Abort(_(b"destination '%s' already exists") % dest)
913
919
914 if revs:
920 if revs:
915 if not srcpeer.capable(b'lookup'):
921 if not srcpeer.capable(b'lookup'):
916 raise error.Abort(
922 raise error.Abort(
917 _(
923 _(
918 b"src repository does not support "
924 b"src repository does not support "
919 b"revision lookup and so doesn't "
925 b"revision lookup and so doesn't "
920 b"support clone by revision"
926 b"support clone by revision"
921 )
927 )
922 )
928 )
923
929
924 # TODO this is batchable.
930 # TODO this is batchable.
925 remoterevs = []
931 remoterevs = []
926 for rev in revs:
932 for rev in revs:
927 with srcpeer.commandexecutor() as e:
933 with srcpeer.commandexecutor() as e:
928 remoterevs.append(
934 remoterevs.append(
929 e.callcommand(
935 e.callcommand(
930 b'lookup',
936 b'lookup',
931 {
937 {
932 b'key': rev,
938 b'key': rev,
933 },
939 },
934 ).result()
940 ).result()
935 )
941 )
936 revs = remoterevs
942 revs = remoterevs
937
943
938 checkout = revs[0]
944 checkout = revs[0]
939 else:
945 else:
940 revs = None
946 revs = None
941 local = destpeer.local()
947 local = destpeer.local()
942 if local:
948 if local:
943 if narrow:
949 if narrow:
944 with local.wlock(), local.lock():
950 with local.wlock(), local.lock():
945 local.setnarrowpats(storeincludepats, storeexcludepats)
951 local.setnarrowpats(storeincludepats, storeexcludepats)
946 narrowspec.copytoworkingcopy(local)
952 narrowspec.copytoworkingcopy(local)
947
953
948 u = urlutil.url(abspath)
954 u = urlutil.url(abspath)
949 defaulturl = bytes(u)
955 defaulturl = bytes(u)
950 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
951 if not stream:
957 if not stream:
952 if pull:
958 if pull:
953 stream = False
959 stream = False
954 else:
960 else:
955 stream = None
961 stream = None
956 # internal config: ui.quietbookmarkmove
962 # internal config: ui.quietbookmarkmove
957 overrides = {(b'ui', b'quietbookmarkmove'): True}
963 overrides = {(b'ui', b'quietbookmarkmove'): True}
958 with local.ui.configoverride(overrides, b'clone'):
964 with local.ui.configoverride(overrides, b'clone'):
959 exchange.pull(
965 exchange.pull(
960 local,
966 local,
961 srcpeer,
967 srcpeer,
962 heads=revs,
968 heads=revs,
963 streamclonerequested=stream,
969 streamclonerequested=stream,
964 includepats=storeincludepats,
970 includepats=storeincludepats,
965 excludepats=storeexcludepats,
971 excludepats=storeexcludepats,
966 depth=depth,
972 depth=depth,
967 )
973 )
968 elif srcrepo:
974 elif srcrepo:
969 # TODO lift restriction once exchange.push() accepts narrow
975 # TODO lift restriction once exchange.push() accepts narrow
970 # push.
976 # push.
971 if narrow:
977 if narrow:
972 raise error.Abort(
978 raise error.Abort(
973 _(
979 _(
974 b'narrow clone not available for '
980 b'narrow clone not available for '
975 b'remote destinations'
981 b'remote destinations'
976 )
982 )
977 )
983 )
978
984
979 exchange.push(
985 exchange.push(
980 srcrepo,
986 srcrepo,
981 destpeer,
987 destpeer,
982 revs=revs,
988 revs=revs,
983 bookmarks=srcrepo._bookmarks.keys(),
989 bookmarks=srcrepo._bookmarks.keys(),
984 )
990 )
985 else:
991 else:
986 raise error.Abort(
992 raise error.Abort(
987 _(b"clone from remote to remote not supported")
993 _(b"clone from remote to remote not supported")
988 )
994 )
989
995
990 cleandir = None
996 cleandir = None
991
997
992 destrepo = destpeer.local()
998 destrepo = destpeer.local()
993 if destrepo:
999 if destrepo:
994 template = uimod.samplehgrcs[b'cloned']
1000 template = uimod.samplehgrcs[b'cloned']
995 u = urlutil.url(abspath)
1001 u = urlutil.url(abspath)
996 u.passwd = None
1002 u.passwd = None
997 defaulturl = bytes(u)
1003 defaulturl = bytes(u)
998 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
999 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1000
1006
1001 if ui.configbool(b'experimental', b'remotenames'):
1007 if ui.configbool(b'experimental', b'remotenames'):
1002 logexchange.pullremotenames(destrepo, srcpeer)
1008 logexchange.pullremotenames(destrepo, srcpeer)
1003
1009
1004 if update:
1010 if update:
1005 if update is not True:
1011 if update is not True:
1006 with srcpeer.commandexecutor() as e:
1012 with srcpeer.commandexecutor() as e:
1007 checkout = e.callcommand(
1013 checkout = e.callcommand(
1008 b'lookup',
1014 b'lookup',
1009 {
1015 {
1010 b'key': update,
1016 b'key': update,
1011 },
1017 },
1012 ).result()
1018 ).result()
1013
1019
1014 uprev = None
1020 uprev = None
1015 status = None
1021 status = None
1016 if checkout is not None:
1022 if checkout is not None:
1017 # Some extensions (at least hg-git and hg-subversion) have
1023 # Some extensions (at least hg-git and hg-subversion) have
1018 # a peer.lookup() implementation that returns a name instead
1024 # a peer.lookup() implementation that returns a name instead
1019 # of a nodeid. We work around it here until we've figured
1025 # of a nodeid. We work around it here until we've figured
1020 # out a better solution.
1026 # out a better solution.
1021 if len(checkout) == 20 and checkout in destrepo:
1027 if len(checkout) == 20 and checkout in destrepo:
1022 uprev = checkout
1028 uprev = checkout
1023 elif scmutil.isrevsymbol(destrepo, checkout):
1029 elif scmutil.isrevsymbol(destrepo, checkout):
1024 uprev = scmutil.revsymbol(destrepo, checkout).node()
1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1025 else:
1031 else:
1026 if update is not True:
1032 if update is not True:
1027 try:
1033 try:
1028 uprev = destrepo.lookup(update)
1034 uprev = destrepo.lookup(update)
1029 except error.RepoLookupError:
1035 except error.RepoLookupError:
1030 pass
1036 pass
1031 if uprev is None:
1037 if uprev is None:
1032 try:
1038 try:
1033 if destrepo._activebookmark:
1039 if destrepo._activebookmark:
1034 uprev = destrepo.lookup(destrepo._activebookmark)
1040 uprev = destrepo.lookup(destrepo._activebookmark)
1035 update = destrepo._activebookmark
1041 update = destrepo._activebookmark
1036 else:
1042 else:
1037 uprev = destrepo._bookmarks[b'@']
1043 uprev = destrepo._bookmarks[b'@']
1038 update = b'@'
1044 update = b'@'
1039 bn = destrepo[uprev].branch()
1045 bn = destrepo[uprev].branch()
1040 if bn == b'default':
1046 if bn == b'default':
1041 status = _(b"updating to bookmark %s\n" % update)
1047 status = _(b"updating to bookmark %s\n" % update)
1042 else:
1048 else:
1043 status = (
1049 status = (
1044 _(b"updating to bookmark %s on branch %s\n")
1050 _(b"updating to bookmark %s on branch %s\n")
1045 ) % (update, bn)
1051 ) % (update, bn)
1046 except KeyError:
1052 except KeyError:
1047 try:
1053 try:
1048 uprev = destrepo.branchtip(b'default')
1054 uprev = destrepo.branchtip(b'default')
1049 except error.RepoLookupError:
1055 except error.RepoLookupError:
1050 uprev = destrepo.lookup(b'tip')
1056 uprev = destrepo.lookup(b'tip')
1051 if not status:
1057 if not status:
1052 bn = destrepo[uprev].branch()
1058 bn = destrepo[uprev].branch()
1053 status = _(b"updating to branch %s\n") % bn
1059 status = _(b"updating to branch %s\n") % bn
1054 destrepo.ui.status(status)
1060 destrepo.ui.status(status)
1055 _update(destrepo, uprev)
1061 _update(destrepo, uprev)
1056 if update in destrepo._bookmarks:
1062 if update in destrepo._bookmarks:
1057 bookmarks.activate(destrepo, update)
1063 bookmarks.activate(destrepo, update)
1058 if destlock is not None:
1064 if destlock is not None:
1059 release(destlock)
1065 release(destlock)
1060 if destwlock is not None:
1066 if destwlock is not None:
1061 release(destlock)
1067 release(destlock)
1062 # here is a tiny windows were someone could end up writing the
1068 # here is a tiny windows were someone could end up writing the
1063 # repository before the cache are sure to be warm. This is "fine"
1069 # repository before the cache are sure to be warm. This is "fine"
1064 # as the only "bad" outcome would be some slowness. That potential
1070 # as the only "bad" outcome would be some slowness. That potential
1065 # slowness already affect reader.
1071 # slowness already affect reader.
1066 with destrepo.lock():
1072 with destrepo.lock():
1067 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1068 finally:
1074 finally:
1069 release(srclock, destlock, destwlock)
1075 release(srclock, destlock, destwlock)
1070 if cleandir is not None:
1076 if cleandir is not None:
1071 shutil.rmtree(cleandir, True)
1077 shutil.rmtree(cleandir, True)
1072 if srcpeer is not None:
1078 if srcpeer is not None:
1073 srcpeer.close()
1079 srcpeer.close()
1074 if destpeer and destpeer.local() is None:
1080 if destpeer and destpeer.local() is None:
1075 destpeer.close()
1081 destpeer.close()
1076 return srcpeer, destpeer
1082 return srcpeer, destpeer
1077
1083
1078
1084
1079 def _showstats(repo, stats, quietempty=False):
1085 def _showstats(repo, stats, quietempty=False):
1080 if quietempty and stats.isempty():
1086 if quietempty and stats.isempty():
1081 return
1087 return
1082 repo.ui.status(
1088 repo.ui.status(
1083 _(
1089 _(
1084 b"%d files updated, %d files merged, "
1090 b"%d files updated, %d files merged, "
1085 b"%d files removed, %d files unresolved\n"
1091 b"%d files removed, %d files unresolved\n"
1086 )
1092 )
1087 % (
1093 % (
1088 stats.updatedcount,
1094 stats.updatedcount,
1089 stats.mergedcount,
1095 stats.mergedcount,
1090 stats.removedcount,
1096 stats.removedcount,
1091 stats.unresolvedcount,
1097 stats.unresolvedcount,
1092 )
1098 )
1093 )
1099 )
1094
1100
1095
1101
1096 def updaterepo(repo, node, overwrite, updatecheck=None):
1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1097 """Update the working directory to node.
1103 """Update the working directory to node.
1098
1104
1099 When overwrite is set, changes are clobbered, merged else
1105 When overwrite is set, changes are clobbered, merged else
1100
1106
1101 returns stats (see pydoc mercurial.merge.applyupdates)"""
1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1102 repo.ui.deprecwarn(
1108 repo.ui.deprecwarn(
1103 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1104 b'5.7',
1110 b'5.7',
1105 )
1111 )
1106 return mergemod._update(
1112 return mergemod._update(
1107 repo,
1113 repo,
1108 node,
1114 node,
1109 branchmerge=False,
1115 branchmerge=False,
1110 force=overwrite,
1116 force=overwrite,
1111 labels=[b'working copy', b'destination'],
1117 labels=[b'working copy', b'destination'],
1112 updatecheck=updatecheck,
1118 updatecheck=updatecheck,
1113 )
1119 )
1114
1120
1115
1121
1116 def update(repo, node, quietempty=False, updatecheck=None):
1122 def update(repo, node, quietempty=False, updatecheck=None):
1117 """update the working directory to node"""
1123 """update the working directory to node"""
1118 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1119 _showstats(repo, stats, quietempty)
1125 _showstats(repo, stats, quietempty)
1120 if stats.unresolvedcount:
1126 if stats.unresolvedcount:
1121 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1122 return stats.unresolvedcount > 0
1128 return stats.unresolvedcount > 0
1123
1129
1124
1130
1125 # naming conflict in clone()
1131 # naming conflict in clone()
1126 _update = update
1132 _update = update
1127
1133
1128
1134
1129 def clean(repo, node, show_stats=True, quietempty=False):
1135 def clean(repo, node, show_stats=True, quietempty=False):
1130 """forcibly switch the working directory to node, clobbering changes"""
1136 """forcibly switch the working directory to node, clobbering changes"""
1131 stats = mergemod.clean_update(repo[node])
1137 stats = mergemod.clean_update(repo[node])
1132 assert stats.unresolvedcount == 0
1138 assert stats.unresolvedcount == 0
1133 if show_stats:
1139 if show_stats:
1134 _showstats(repo, stats, quietempty)
1140 _showstats(repo, stats, quietempty)
1135 return False
1141 return False
1136
1142
1137
1143
1138 # naming conflict in updatetotally()
1144 # naming conflict in updatetotally()
1139 _clean = clean
1145 _clean = clean
1140
1146
1141 _VALID_UPDATECHECKS = {
1147 _VALID_UPDATECHECKS = {
1142 mergemod.UPDATECHECK_ABORT,
1148 mergemod.UPDATECHECK_ABORT,
1143 mergemod.UPDATECHECK_NONE,
1149 mergemod.UPDATECHECK_NONE,
1144 mergemod.UPDATECHECK_LINEAR,
1150 mergemod.UPDATECHECK_LINEAR,
1145 mergemod.UPDATECHECK_NO_CONFLICT,
1151 mergemod.UPDATECHECK_NO_CONFLICT,
1146 }
1152 }
1147
1153
1148
1154
1149 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1150 """Update the working directory with extra care for non-file components
1156 """Update the working directory with extra care for non-file components
1151
1157
1152 This takes care of non-file components below:
1158 This takes care of non-file components below:
1153
1159
1154 :bookmark: might be advanced or (in)activated
1160 :bookmark: might be advanced or (in)activated
1155
1161
1156 This takes arguments below:
1162 This takes arguments below:
1157
1163
1158 :checkout: to which revision the working directory is updated
1164 :checkout: to which revision the working directory is updated
1159 :brev: a name, which might be a bookmark to be activated after updating
1165 :brev: a name, which might be a bookmark to be activated after updating
1160 :clean: whether changes in the working directory can be discarded
1166 :clean: whether changes in the working directory can be discarded
1161 :updatecheck: how to deal with a dirty working directory
1167 :updatecheck: how to deal with a dirty working directory
1162
1168
1163 Valid values for updatecheck are the UPDATECHECK_* constants
1169 Valid values for updatecheck are the UPDATECHECK_* constants
1164 defined in the merge module. Passing `None` will result in using the
1170 defined in the merge module. Passing `None` will result in using the
1165 configured default.
1171 configured default.
1166
1172
1167 * ABORT: abort if the working directory is dirty
1173 * ABORT: abort if the working directory is dirty
1168 * NONE: don't check (merge working directory changes into destination)
1174 * NONE: don't check (merge working directory changes into destination)
1169 * LINEAR: check that update is linear before merging working directory
1175 * LINEAR: check that update is linear before merging working directory
1170 changes into destination
1176 changes into destination
1171 * NO_CONFLICT: check that the update does not result in file merges
1177 * NO_CONFLICT: check that the update does not result in file merges
1172
1178
1173 This returns whether conflict is detected at updating or not.
1179 This returns whether conflict is detected at updating or not.
1174 """
1180 """
1175 if updatecheck is None:
1181 if updatecheck is None:
1176 updatecheck = ui.config(b'commands', b'update.check')
1182 updatecheck = ui.config(b'commands', b'update.check')
1177 if updatecheck not in _VALID_UPDATECHECKS:
1183 if updatecheck not in _VALID_UPDATECHECKS:
1178 # If not configured, or invalid value configured
1184 # If not configured, or invalid value configured
1179 updatecheck = mergemod.UPDATECHECK_LINEAR
1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1180 if updatecheck not in _VALID_UPDATECHECKS:
1186 if updatecheck not in _VALID_UPDATECHECKS:
1181 raise ValueError(
1187 raise ValueError(
1182 r'Invalid updatecheck value %r (can accept %r)'
1188 r'Invalid updatecheck value %r (can accept %r)'
1183 % (updatecheck, _VALID_UPDATECHECKS)
1189 % (updatecheck, _VALID_UPDATECHECKS)
1184 )
1190 )
1185 with repo.wlock():
1191 with repo.wlock():
1186 movemarkfrom = None
1192 movemarkfrom = None
1187 warndest = False
1193 warndest = False
1188 if checkout is None:
1194 if checkout is None:
1189 updata = destutil.destupdate(repo, clean=clean)
1195 updata = destutil.destupdate(repo, clean=clean)
1190 checkout, movemarkfrom, brev = updata
1196 checkout, movemarkfrom, brev = updata
1191 warndest = True
1197 warndest = True
1192
1198
1193 if clean:
1199 if clean:
1194 ret = _clean(repo, checkout)
1200 ret = _clean(repo, checkout)
1195 else:
1201 else:
1196 if updatecheck == mergemod.UPDATECHECK_ABORT:
1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1197 cmdutil.bailifchanged(repo, merge=False)
1203 cmdutil.bailifchanged(repo, merge=False)
1198 updatecheck = mergemod.UPDATECHECK_NONE
1204 updatecheck = mergemod.UPDATECHECK_NONE
1199 ret = _update(repo, checkout, updatecheck=updatecheck)
1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1200
1206
1201 if not ret and movemarkfrom:
1207 if not ret and movemarkfrom:
1202 if movemarkfrom == repo[b'.'].node():
1208 if movemarkfrom == repo[b'.'].node():
1203 pass # no-op update
1209 pass # no-op update
1204 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1205 b = ui.label(repo._activebookmark, b'bookmarks.active')
1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1206 ui.status(_(b"updating bookmark %s\n") % b)
1212 ui.status(_(b"updating bookmark %s\n") % b)
1207 else:
1213 else:
1208 # this can happen with a non-linear update
1214 # this can happen with a non-linear update
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1215 b = ui.label(repo._activebookmark, b'bookmarks')
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 bookmarks.deactivate(repo)
1217 bookmarks.deactivate(repo)
1212 elif brev in repo._bookmarks:
1218 elif brev in repo._bookmarks:
1213 if brev != repo._activebookmark:
1219 if brev != repo._activebookmark:
1214 b = ui.label(brev, b'bookmarks.active')
1220 b = ui.label(brev, b'bookmarks.active')
1215 ui.status(_(b"(activating bookmark %s)\n") % b)
1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1216 bookmarks.activate(repo, brev)
1222 bookmarks.activate(repo, brev)
1217 elif brev:
1223 elif brev:
1218 if repo._activebookmark:
1224 if repo._activebookmark:
1219 b = ui.label(repo._activebookmark, b'bookmarks')
1225 b = ui.label(repo._activebookmark, b'bookmarks')
1220 ui.status(_(b"(leaving bookmark %s)\n") % b)
1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1221 bookmarks.deactivate(repo)
1227 bookmarks.deactivate(repo)
1222
1228
1223 if warndest:
1229 if warndest:
1224 destutil.statusotherdests(ui, repo)
1230 destutil.statusotherdests(ui, repo)
1225
1231
1226 return ret
1232 return ret
1227
1233
1228
1234
1229 def merge(
1235 def merge(
1230 ctx,
1236 ctx,
1231 force=False,
1237 force=False,
1232 remind=True,
1238 remind=True,
1233 labels=None,
1239 labels=None,
1234 ):
1240 ):
1235 """Branch merge with node, resolving changes. Return true if any
1241 """Branch merge with node, resolving changes. Return true if any
1236 unresolved conflicts."""
1242 unresolved conflicts."""
1237 repo = ctx.repo()
1243 repo = ctx.repo()
1238 stats = mergemod.merge(ctx, force=force, labels=labels)
1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1239 _showstats(repo, stats)
1245 _showstats(repo, stats)
1240 if stats.unresolvedcount:
1246 if stats.unresolvedcount:
1241 repo.ui.status(
1247 repo.ui.status(
1242 _(
1248 _(
1243 b"use 'hg resolve' to retry unresolved file merges "
1249 b"use 'hg resolve' to retry unresolved file merges "
1244 b"or 'hg merge --abort' to abandon\n"
1250 b"or 'hg merge --abort' to abandon\n"
1245 )
1251 )
1246 )
1252 )
1247 elif remind:
1253 elif remind:
1248 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1249 return stats.unresolvedcount > 0
1255 return stats.unresolvedcount > 0
1250
1256
1251
1257
1252 def abortmerge(ui, repo):
1258 def abortmerge(ui, repo):
1253 ms = mergestatemod.mergestate.read(repo)
1259 ms = mergestatemod.mergestate.read(repo)
1254 if ms.active():
1260 if ms.active():
1255 # there were conflicts
1261 # there were conflicts
1256 node = ms.localctx.hex()
1262 node = ms.localctx.hex()
1257 else:
1263 else:
1258 # there were no conficts, mergestate was not stored
1264 # there were no conficts, mergestate was not stored
1259 node = repo[b'.'].hex()
1265 node = repo[b'.'].hex()
1260
1266
1261 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1262 stats = mergemod.clean_update(repo[node])
1268 stats = mergemod.clean_update(repo[node])
1263 assert stats.unresolvedcount == 0
1269 assert stats.unresolvedcount == 0
1264 _showstats(repo, stats)
1270 _showstats(repo, stats)
1265
1271
1266
1272
1267 def _incoming(
1273 def _incoming(
1268 displaychlist,
1274 displaychlist,
1269 subreporecurse,
1275 subreporecurse,
1270 ui,
1276 ui,
1271 repo,
1277 repo,
1272 source,
1278 source,
1273 opts,
1279 opts,
1274 buffered=False,
1280 buffered=False,
1275 subpath=None,
1281 subpath=None,
1276 ):
1282 ):
1277 """
1283 """
1278 Helper for incoming / gincoming.
1284 Helper for incoming / gincoming.
1279 displaychlist gets called with
1285 displaychlist gets called with
1280 (remoterepo, incomingchangesetlist, displayer) parameters,
1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1281 and is supposed to contain only code that can't be unified.
1287 and is supposed to contain only code that can't be unified.
1282 """
1288 """
1283 srcs = urlutil.get_pull_paths(repo, ui, [source])
1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1284 srcs = list(srcs)
1290 srcs = list(srcs)
1285 if len(srcs) != 1:
1291 if len(srcs) != 1:
1286 msg = _(b'for now, incoming supports only a single source, %d provided')
1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1287 msg %= len(srcs)
1293 msg %= len(srcs)
1288 raise error.Abort(msg)
1294 raise error.Abort(msg)
1289 path = srcs[0]
1295 path = srcs[0]
1290 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1291 if subpath is not None:
1297 if subpath is not None:
1292 subpath = urlutil.url(subpath)
1298 subpath = urlutil.url(subpath)
1293 if subpath.isabs():
1299 if subpath.isabs():
1294 source = bytes(subpath)
1300 source = bytes(subpath)
1295 else:
1301 else:
1296 p = urlutil.url(source)
1302 p = urlutil.url(source)
1297 if p.islocal():
1303 if p.islocal():
1298 normpath = os.path.normpath
1304 normpath = os.path.normpath
1299 else:
1305 else:
1300 normpath = posixpath.normpath
1306 normpath = posixpath.normpath
1301 p.path = normpath(b'%s/%s' % (p.path, subpath))
1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1302 source = bytes(p)
1308 source = bytes(p)
1303 other = peer(repo, opts, source)
1309 other = peer(repo, opts, source)
1304 cleanupfn = other.close
1310 cleanupfn = other.close
1305 try:
1311 try:
1306 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1307 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1308
1314
1309 if revs:
1315 if revs:
1310 revs = [other.lookup(rev) for rev in revs]
1316 revs = [other.lookup(rev) for rev in revs]
1311 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1312 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1313 )
1319 )
1314
1320
1315 if not chlist:
1321 if not chlist:
1316 ui.status(_(b"no changes found\n"))
1322 ui.status(_(b"no changes found\n"))
1317 return subreporecurse()
1323 return subreporecurse()
1318 ui.pager(b'incoming')
1324 ui.pager(b'incoming')
1319 displayer = logcmdutil.changesetdisplayer(
1325 displayer = logcmdutil.changesetdisplayer(
1320 ui, other, opts, buffered=buffered
1326 ui, other, opts, buffered=buffered
1321 )
1327 )
1322 displaychlist(other, chlist, displayer)
1328 displaychlist(other, chlist, displayer)
1323 displayer.close()
1329 displayer.close()
1324 finally:
1330 finally:
1325 cleanupfn()
1331 cleanupfn()
1326 subreporecurse()
1332 subreporecurse()
1327 return 0 # exit code is zero since we found incoming changes
1333 return 0 # exit code is zero since we found incoming changes
1328
1334
1329
1335
1330 def incoming(ui, repo, source, opts, subpath=None):
1336 def incoming(ui, repo, source, opts, subpath=None):
1331 def subreporecurse():
1337 def subreporecurse():
1332 ret = 1
1338 ret = 1
1333 if opts.get(b'subrepos'):
1339 if opts.get(b'subrepos'):
1334 ctx = repo[None]
1340 ctx = repo[None]
1335 for subpath in sorted(ctx.substate):
1341 for subpath in sorted(ctx.substate):
1336 sub = ctx.sub(subpath)
1342 sub = ctx.sub(subpath)
1337 ret = min(ret, sub.incoming(ui, source, opts))
1343 ret = min(ret, sub.incoming(ui, source, opts))
1338 return ret
1344 return ret
1339
1345
1340 def display(other, chlist, displayer):
1346 def display(other, chlist, displayer):
1341 limit = logcmdutil.getlimit(opts)
1347 limit = logcmdutil.getlimit(opts)
1342 if opts.get(b'newest_first'):
1348 if opts.get(b'newest_first'):
1343 chlist.reverse()
1349 chlist.reverse()
1344 count = 0
1350 count = 0
1345 for n in chlist:
1351 for n in chlist:
1346 if limit is not None and count >= limit:
1352 if limit is not None and count >= limit:
1347 break
1353 break
1348 parents = [
1354 parents = [
1349 p for p in other.changelog.parents(n) if p != repo.nullid
1355 p for p in other.changelog.parents(n) if p != repo.nullid
1350 ]
1356 ]
1351 if opts.get(b'no_merges') and len(parents) == 2:
1357 if opts.get(b'no_merges') and len(parents) == 2:
1352 continue
1358 continue
1353 count += 1
1359 count += 1
1354 displayer.show(other[n])
1360 displayer.show(other[n])
1355
1361
1356 return _incoming(
1362 return _incoming(
1357 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1358 )
1364 )
1359
1365
1360
1366
1361 def _outgoing(ui, repo, dests, opts, subpath=None):
1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1362 out = set()
1368 out = set()
1363 others = []
1369 others = []
1364 for path in urlutil.get_push_paths(repo, ui, dests):
1370 for path in urlutil.get_push_paths(repo, ui, dests):
1365 dest = path.pushloc or path.loc
1371 dest = path.pushloc or path.loc
1366 if subpath is not None:
1372 if subpath is not None:
1367 subpath = urlutil.url(subpath)
1373 subpath = urlutil.url(subpath)
1368 if subpath.isabs():
1374 if subpath.isabs():
1369 dest = bytes(subpath)
1375 dest = bytes(subpath)
1370 else:
1376 else:
1371 p = urlutil.url(dest)
1377 p = urlutil.url(dest)
1372 if p.islocal():
1378 if p.islocal():
1373 normpath = os.path.normpath
1379 normpath = os.path.normpath
1374 else:
1380 else:
1375 normpath = posixpath.normpath
1381 normpath = posixpath.normpath
1376 p.path = normpath(b'%s/%s' % (p.path, subpath))
1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1377 dest = bytes(p)
1383 dest = bytes(p)
1378 branches = path.branch, opts.get(b'branch') or []
1384 branches = path.branch, opts.get(b'branch') or []
1379
1385
1380 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1381 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1382 if revs:
1388 if revs:
1383 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1384
1390
1385 other = peer(repo, opts, dest)
1391 other = peer(repo, opts, dest)
1386 try:
1392 try:
1387 outgoing = discovery.findcommonoutgoing(
1393 outgoing = discovery.findcommonoutgoing(
1388 repo, other, revs, force=opts.get(b'force')
1394 repo, other, revs, force=opts.get(b'force')
1389 )
1395 )
1390 o = outgoing.missing
1396 o = outgoing.missing
1391 out.update(o)
1397 out.update(o)
1392 if not o:
1398 if not o:
1393 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1394 others.append(other)
1400 others.append(other)
1395 except: # re-raises
1401 except: # re-raises
1396 other.close()
1402 other.close()
1397 raise
1403 raise
1398 # make sure this is ordered by revision number
1404 # make sure this is ordered by revision number
1399 outgoing_revs = list(out)
1405 outgoing_revs = list(out)
1400 cl = repo.changelog
1406 cl = repo.changelog
1401 outgoing_revs.sort(key=cl.rev)
1407 outgoing_revs.sort(key=cl.rev)
1402 return outgoing_revs, others
1408 return outgoing_revs, others
1403
1409
1404
1410
1405 def _outgoing_recurse(ui, repo, dests, opts):
1411 def _outgoing_recurse(ui, repo, dests, opts):
1406 ret = 1
1412 ret = 1
1407 if opts.get(b'subrepos'):
1413 if opts.get(b'subrepos'):
1408 ctx = repo[None]
1414 ctx = repo[None]
1409 for subpath in sorted(ctx.substate):
1415 for subpath in sorted(ctx.substate):
1410 sub = ctx.sub(subpath)
1416 sub = ctx.sub(subpath)
1411 ret = min(ret, sub.outgoing(ui, dests, opts))
1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1412 return ret
1418 return ret
1413
1419
1414
1420
1415 def _outgoing_filter(repo, revs, opts):
1421 def _outgoing_filter(repo, revs, opts):
1416 """apply revision filtering/ordering option for outgoing"""
1422 """apply revision filtering/ordering option for outgoing"""
1417 limit = logcmdutil.getlimit(opts)
1423 limit = logcmdutil.getlimit(opts)
1418 no_merges = opts.get(b'no_merges')
1424 no_merges = opts.get(b'no_merges')
1419 if opts.get(b'newest_first'):
1425 if opts.get(b'newest_first'):
1420 revs.reverse()
1426 revs.reverse()
1421 if limit is None and not no_merges:
1427 if limit is None and not no_merges:
1422 for r in revs:
1428 for r in revs:
1423 yield r
1429 yield r
1424 return
1430 return
1425
1431
1426 count = 0
1432 count = 0
1427 cl = repo.changelog
1433 cl = repo.changelog
1428 for n in revs:
1434 for n in revs:
1429 if limit is not None and count >= limit:
1435 if limit is not None and count >= limit:
1430 break
1436 break
1431 parents = [p for p in cl.parents(n) if p != repo.nullid]
1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1432 if no_merges and len(parents) == 2:
1438 if no_merges and len(parents) == 2:
1433 continue
1439 continue
1434 count += 1
1440 count += 1
1435 yield n
1441 yield n
1436
1442
1437
1443
1438 def outgoing(ui, repo, dests, opts, subpath=None):
1444 def outgoing(ui, repo, dests, opts, subpath=None):
1439 if opts.get(b'graph'):
1445 if opts.get(b'graph'):
1440 logcmdutil.checkunsupportedgraphflags([], opts)
1446 logcmdutil.checkunsupportedgraphflags([], opts)
1441 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1442 ret = 1
1448 ret = 1
1443 try:
1449 try:
1444 if o:
1450 if o:
1445 ret = 0
1451 ret = 0
1446
1452
1447 if opts.get(b'graph'):
1453 if opts.get(b'graph'):
1448 revdag = logcmdutil.graphrevs(repo, o, opts)
1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1449 ui.pager(b'outgoing')
1455 ui.pager(b'outgoing')
1450 displayer = logcmdutil.changesetdisplayer(
1456 displayer = logcmdutil.changesetdisplayer(
1451 ui, repo, opts, buffered=True
1457 ui, repo, opts, buffered=True
1452 )
1458 )
1453 logcmdutil.displaygraph(
1459 logcmdutil.displaygraph(
1454 ui, repo, revdag, displayer, graphmod.asciiedges
1460 ui, repo, revdag, displayer, graphmod.asciiedges
1455 )
1461 )
1456 else:
1462 else:
1457 ui.pager(b'outgoing')
1463 ui.pager(b'outgoing')
1458 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1459 for n in _outgoing_filter(repo, o, opts):
1465 for n in _outgoing_filter(repo, o, opts):
1460 displayer.show(repo[n])
1466 displayer.show(repo[n])
1461 displayer.close()
1467 displayer.close()
1462 for oth in others:
1468 for oth in others:
1463 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1464 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1465 return ret # exit code is zero since we found outgoing changes
1471 return ret # exit code is zero since we found outgoing changes
1466 finally:
1472 finally:
1467 for oth in others:
1473 for oth in others:
1468 oth.close()
1474 oth.close()
1469
1475
1470
1476
1471 def verify(repo, level=None):
1477 def verify(repo, level=None):
1472 """verify the consistency of a repository"""
1478 """verify the consistency of a repository"""
1473 ret = verifymod.verify(repo, level=level)
1479 ret = verifymod.verify(repo, level=level)
1474
1480
1475 # Broken subrepo references in hidden csets don't seem worth worrying about,
1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1476 # since they can't be pushed/pulled, and --hidden can be used if they are a
1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1477 # concern.
1483 # concern.
1478
1484
1479 # pathto() is needed for -R case
1485 # pathto() is needed for -R case
1480 revs = repo.revs(
1486 revs = repo.revs(
1481 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1482 )
1488 )
1483
1489
1484 if revs:
1490 if revs:
1485 repo.ui.status(_(b'checking subrepo links\n'))
1491 repo.ui.status(_(b'checking subrepo links\n'))
1486 for rev in revs:
1492 for rev in revs:
1487 ctx = repo[rev]
1493 ctx = repo[rev]
1488 try:
1494 try:
1489 for subpath in ctx.substate:
1495 for subpath in ctx.substate:
1490 try:
1496 try:
1491 ret = (
1497 ret = (
1492 ctx.sub(subpath, allowcreate=False).verify() or ret
1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1493 )
1499 )
1494 except error.RepoError as e:
1500 except error.RepoError as e:
1495 repo.ui.warn(b'%d: %s\n' % (rev, e))
1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1496 except Exception:
1502 except Exception:
1497 repo.ui.warn(
1503 repo.ui.warn(
1498 _(b'.hgsubstate is corrupt in revision %s\n')
1504 _(b'.hgsubstate is corrupt in revision %s\n')
1499 % short(ctx.node())
1505 % short(ctx.node())
1500 )
1506 )
1501
1507
1502 return ret
1508 return ret
1503
1509
1504
1510
1505 def remoteui(src, opts):
1511 def remoteui(src, opts):
1506 """build a remote ui from ui or repo and opts"""
1512 """build a remote ui from ui or repo and opts"""
1507 if util.safehasattr(src, b'baseui'): # looks like a repository
1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1508 dst = src.baseui.copy() # drop repo-specific config
1514 dst = src.baseui.copy() # drop repo-specific config
1509 src = src.ui # copy target options from repo
1515 src = src.ui # copy target options from repo
1510 else: # assume it's a global ui object
1516 else: # assume it's a global ui object
1511 dst = src.copy() # keep all global options
1517 dst = src.copy() # keep all global options
1512
1518
1513 # copy ssh-specific options
1519 # copy ssh-specific options
1514 for o in b'ssh', b'remotecmd':
1520 for o in b'ssh', b'remotecmd':
1515 v = opts.get(o) or src.config(b'ui', o)
1521 v = opts.get(o) or src.config(b'ui', o)
1516 if v:
1522 if v:
1517 dst.setconfig(b"ui", o, v, b'copied')
1523 dst.setconfig(b"ui", o, v, b'copied')
1518
1524
1519 # copy bundle-specific options
1525 # copy bundle-specific options
1520 r = src.config(b'bundle', b'mainreporoot')
1526 r = src.config(b'bundle', b'mainreporoot')
1521 if r:
1527 if r:
1522 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1523
1529
1524 # copy selected local settings to the remote ui
1530 # copy selected local settings to the remote ui
1525 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1526 for key, val in src.configitems(sect):
1532 for key, val in src.configitems(sect):
1527 dst.setconfig(sect, key, val, b'copied')
1533 dst.setconfig(sect, key, val, b'copied')
1528 v = src.config(b'web', b'cacerts')
1534 v = src.config(b'web', b'cacerts')
1529 if v:
1535 if v:
1530 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1531
1537
1532 return dst
1538 return dst
1533
1539
1534
1540
1535 # Files of interest
1541 # Files of interest
1536 # Used to check if the repository has changed looking at mtime and size of
1542 # Used to check if the repository has changed looking at mtime and size of
1537 # these files.
1543 # these files.
1538 foi = [
1544 foi = [
1539 (b'spath', b'00changelog.i'),
1545 (b'spath', b'00changelog.i'),
1540 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1541 (b'spath', b'obsstore'),
1547 (b'spath', b'obsstore'),
1542 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1543 ]
1549 ]
1544
1550
1545
1551
1546 class cachedlocalrepo:
1552 class cachedlocalrepo:
1547 """Holds a localrepository that can be cached and reused."""
1553 """Holds a localrepository that can be cached and reused."""
1548
1554
1549 def __init__(self, repo):
1555 def __init__(self, repo):
1550 """Create a new cached repo from an existing repo.
1556 """Create a new cached repo from an existing repo.
1551
1557
1552 We assume the passed in repo was recently created. If the
1558 We assume the passed in repo was recently created. If the
1553 repo has changed between when it was created and when it was
1559 repo has changed between when it was created and when it was
1554 turned into a cache, it may not refresh properly.
1560 turned into a cache, it may not refresh properly.
1555 """
1561 """
1556 assert isinstance(repo, localrepo.localrepository)
1562 assert isinstance(repo, localrepo.localrepository)
1557 self._repo = repo
1563 self._repo = repo
1558 self._state, self.mtime = self._repostate()
1564 self._state, self.mtime = self._repostate()
1559 self._filtername = repo.filtername
1565 self._filtername = repo.filtername
1560
1566
1561 def fetch(self):
1567 def fetch(self):
1562 """Refresh (if necessary) and return a repository.
1568 """Refresh (if necessary) and return a repository.
1563
1569
1564 If the cached instance is out of date, it will be recreated
1570 If the cached instance is out of date, it will be recreated
1565 automatically and returned.
1571 automatically and returned.
1566
1572
1567 Returns a tuple of the repo and a boolean indicating whether a new
1573 Returns a tuple of the repo and a boolean indicating whether a new
1568 repo instance was created.
1574 repo instance was created.
1569 """
1575 """
1570 # We compare the mtimes and sizes of some well-known files to
1576 # We compare the mtimes and sizes of some well-known files to
1571 # determine if the repo changed. This is not precise, as mtimes
1577 # determine if the repo changed. This is not precise, as mtimes
1572 # are susceptible to clock skew and imprecise filesystems and
1578 # are susceptible to clock skew and imprecise filesystems and
1573 # file content can change while maintaining the same size.
1579 # file content can change while maintaining the same size.
1574
1580
1575 state, mtime = self._repostate()
1581 state, mtime = self._repostate()
1576 if state == self._state:
1582 if state == self._state:
1577 return self._repo, False
1583 return self._repo, False
1578
1584
1579 repo = repository(self._repo.baseui, self._repo.url())
1585 repo = repository(self._repo.baseui, self._repo.url())
1580 if self._filtername:
1586 if self._filtername:
1581 self._repo = repo.filtered(self._filtername)
1587 self._repo = repo.filtered(self._filtername)
1582 else:
1588 else:
1583 self._repo = repo.unfiltered()
1589 self._repo = repo.unfiltered()
1584 self._state = state
1590 self._state = state
1585 self.mtime = mtime
1591 self.mtime = mtime
1586
1592
1587 return self._repo, True
1593 return self._repo, True
1588
1594
1589 def _repostate(self):
1595 def _repostate(self):
1590 state = []
1596 state = []
1591 maxmtime = -1
1597 maxmtime = -1
1592 for attr, fname in foi:
1598 for attr, fname in foi:
1593 prefix = getattr(self._repo, attr)
1599 prefix = getattr(self._repo, attr)
1594 p = os.path.join(prefix, fname)
1600 p = os.path.join(prefix, fname)
1595 try:
1601 try:
1596 st = os.stat(p)
1602 st = os.stat(p)
1597 except OSError:
1603 except OSError:
1598 st = os.stat(prefix)
1604 st = os.stat(prefix)
1599 state.append((st[stat.ST_MTIME], st.st_size))
1605 state.append((st[stat.ST_MTIME], st.st_size))
1600 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1601
1607
1602 return tuple(state), maxmtime
1608 return tuple(state), maxmtime
1603
1609
1604 def copy(self):
1610 def copy(self):
1605 """Obtain a copy of this class instance.
1611 """Obtain a copy of this class instance.
1606
1612
1607 A new localrepository instance is obtained. The new instance should be
1613 A new localrepository instance is obtained. The new instance should be
1608 completely independent of the original.
1614 completely independent of the original.
1609 """
1615 """
1610 repo = repository(self._repo.baseui, self._repo.origroot)
1616 repo = repository(self._repo.baseui, self._repo.origroot)
1611 if self._filtername:
1617 if self._filtername:
1612 repo = repo.filtered(self._filtername)
1618 repo = repo.filtered(self._filtername)
1613 else:
1619 else:
1614 repo = repo.unfiltered()
1620 repo = repo.unfiltered()
1615 c = cachedlocalrepo(repo)
1621 c = cachedlocalrepo(repo)
1616 c._state = self._state
1622 c._state = self._state
1617 c.mtime = self.mtime
1623 c.mtime = self.mtime
1618 return c
1624 return c
General Comments 0
You need to be logged in to leave comments. Login now