##// END OF EJS Templates
static-http: have `statichttprepo.instance` return a peer object...
marmoute -
r50586:c3728734 default
parent child Browse files
Show More
@@ -1,1624 +1,1624 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 b'static-http': statichttprepo,
151 }
150 }
152
151
153 peer_schemes = {
152 peer_schemes = {
154 b'http': httppeer,
153 b'http': httppeer,
155 b'https': httppeer,
154 b'https': httppeer,
156 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _peerorrepo(
195 def _peerorrepo(
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 ):
197 ):
198 """return a repository object for the specified path"""
198 """return a repository object for the specified path"""
199 cls = _peerlookup(path)
199 cls = _peerlookup(path)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 return obj
202 return obj
203
203
204
204
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 ui = getattr(obj, "ui", ui)
206 ui = getattr(obj, "ui", ui)
207 for f in presetupfuncs or []:
207 for f in presetupfuncs or []:
208 f(ui, obj)
208 f(ui, obj)
209 ui.log(b'extension', b'- executing reposetup hooks\n')
209 ui.log(b'extension', b'- executing reposetup hooks\n')
210 with util.timedcm('all reposetup') as allreposetupstats:
210 with util.timedcm('all reposetup') as allreposetupstats:
211 for name, module in extensions.extensions(ui):
211 for name, module in extensions.extensions(ui):
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 hook = getattr(module, 'reposetup', None)
213 hook = getattr(module, 'reposetup', None)
214 if hook:
214 if hook:
215 with util.timedcm('reposetup %r', name) as stats:
215 with util.timedcm('reposetup %r', name) as stats:
216 hook(ui, obj)
216 hook(ui, obj)
217 msg = b' > reposetup for %s took %s\n'
217 msg = b' > reposetup for %s took %s\n'
218 ui.log(b'extension', msg, name, stats)
218 ui.log(b'extension', msg, name, stats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 if not obj.local():
220 if not obj.local():
221 for f in wirepeersetupfuncs:
221 for f in wirepeersetupfuncs:
222 f(ui, obj)
222 f(ui, obj)
223
223
224
224
225 def repository(
225 def repository(
226 ui,
226 ui,
227 path=b'',
227 path=b'',
228 create=False,
228 create=False,
229 presetupfuncs=None,
229 presetupfuncs=None,
230 intents=None,
230 intents=None,
231 createopts=None,
231 createopts=None,
232 ):
232 ):
233 """return a repository object for the specified path"""
233 """return a repository object for the specified path"""
234 peer = _peerorrepo(
234 peer = _peerorrepo(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 presetupfuncs=presetupfuncs,
238 presetupfuncs=presetupfuncs,
239 intents=intents,
239 intents=intents,
240 createopts=createopts,
240 createopts=createopts,
241 )
241 )
242 repo = peer.local()
242 repo = peer.local()
243 if not repo:
243 if not repo:
244 raise error.Abort(
244 raise error.Abort(
245 _(b"repository '%s' is not local") % (path or peer.url())
245 _(b"repository '%s' is not local") % (path or peer.url())
246 )
246 )
247 return repo.filtered(b'visible')
247 return repo.filtered(b'visible')
248
248
249
249
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
251 '''return a repository peer for the specified path'''
251 '''return a repository peer for the specified path'''
252 rui = remoteui(uiorrepo, opts)
252 rui = remoteui(uiorrepo, opts)
253 return _peerorrepo(
253 return _peerorrepo(
254 rui, path, create, intents=intents, createopts=createopts
254 rui, path, create, intents=intents, createopts=createopts
255 ).peer()
255 ).peer()
256
256
257
257
258 def defaultdest(source):
258 def defaultdest(source):
259 """return default destination of clone if none is given
259 """return default destination of clone if none is given
260
260
261 >>> defaultdest(b'foo')
261 >>> defaultdest(b'foo')
262 'foo'
262 'foo'
263 >>> defaultdest(b'/foo/bar')
263 >>> defaultdest(b'/foo/bar')
264 'bar'
264 'bar'
265 >>> defaultdest(b'/')
265 >>> defaultdest(b'/')
266 ''
266 ''
267 >>> defaultdest(b'')
267 >>> defaultdest(b'')
268 ''
268 ''
269 >>> defaultdest(b'http://example.org/')
269 >>> defaultdest(b'http://example.org/')
270 ''
270 ''
271 >>> defaultdest(b'http://example.org/foo/')
271 >>> defaultdest(b'http://example.org/foo/')
272 'foo'
272 'foo'
273 """
273 """
274 path = urlutil.url(source).path
274 path = urlutil.url(source).path
275 if not path:
275 if not path:
276 return b''
276 return b''
277 return os.path.basename(os.path.normpath(path))
277 return os.path.basename(os.path.normpath(path))
278
278
279
279
280 def sharedreposource(repo):
280 def sharedreposource(repo):
281 """Returns repository object for source repository of a shared repo.
281 """Returns repository object for source repository of a shared repo.
282
282
283 If repo is not a shared repository, returns None.
283 If repo is not a shared repository, returns None.
284 """
284 """
285 if repo.sharedpath == repo.path:
285 if repo.sharedpath == repo.path:
286 return None
286 return None
287
287
288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
289 return repo.srcrepo
289 return repo.srcrepo
290
290
291 # the sharedpath always ends in the .hg; we want the path to the repo
291 # the sharedpath always ends in the .hg; we want the path to the repo
292 source = repo.vfs.split(repo.sharedpath)[0]
292 source = repo.vfs.split(repo.sharedpath)[0]
293 srcurl, branches = urlutil.parseurl(source)
293 srcurl, branches = urlutil.parseurl(source)
294 srcrepo = repository(repo.ui, srcurl)
294 srcrepo = repository(repo.ui, srcurl)
295 repo.srcrepo = srcrepo
295 repo.srcrepo = srcrepo
296 return srcrepo
296 return srcrepo
297
297
298
298
299 def share(
299 def share(
300 ui,
300 ui,
301 source,
301 source,
302 dest=None,
302 dest=None,
303 update=True,
303 update=True,
304 bookmarks=True,
304 bookmarks=True,
305 defaultpath=None,
305 defaultpath=None,
306 relative=False,
306 relative=False,
307 ):
307 ):
308 '''create a shared repository'''
308 '''create a shared repository'''
309
309
310 not_local_msg = _(b'can only share local repositories')
310 not_local_msg = _(b'can only share local repositories')
311 if util.safehasattr(source, 'local'):
311 if util.safehasattr(source, 'local'):
312 if source.local() is None:
312 if source.local() is None:
313 raise error.Abort(not_local_msg)
313 raise error.Abort(not_local_msg)
314 elif not islocal(source):
314 elif not islocal(source):
315 # XXX why are we getting bytes here ?
315 # XXX why are we getting bytes here ?
316 raise error.Abort(not_local_msg)
316 raise error.Abort(not_local_msg)
317
317
318 if not dest:
318 if not dest:
319 dest = defaultdest(source)
319 dest = defaultdest(source)
320 else:
320 else:
321 dest = urlutil.get_clone_path(ui, dest)[1]
321 dest = urlutil.get_clone_path(ui, dest)[1]
322
322
323 if isinstance(source, bytes):
323 if isinstance(source, bytes):
324 origsource, source, branches = urlutil.get_clone_path(ui, source)
324 origsource, source, branches = urlutil.get_clone_path(ui, source)
325 srcrepo = repository(ui, source)
325 srcrepo = repository(ui, source)
326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
327 else:
327 else:
328 srcrepo = source.local()
328 srcrepo = source.local()
329 checkout = None
329 checkout = None
330
330
331 shareditems = set()
331 shareditems = set()
332 if bookmarks:
332 if bookmarks:
333 shareditems.add(sharedbookmarks)
333 shareditems.add(sharedbookmarks)
334
334
335 r = repository(
335 r = repository(
336 ui,
336 ui,
337 dest,
337 dest,
338 create=True,
338 create=True,
339 createopts={
339 createopts={
340 b'sharedrepo': srcrepo,
340 b'sharedrepo': srcrepo,
341 b'sharedrelative': relative,
341 b'sharedrelative': relative,
342 b'shareditems': shareditems,
342 b'shareditems': shareditems,
343 },
343 },
344 )
344 )
345
345
346 postshare(srcrepo, r, defaultpath=defaultpath)
346 postshare(srcrepo, r, defaultpath=defaultpath)
347 r = repository(ui, dest)
347 r = repository(ui, dest)
348 _postshareupdate(r, update, checkout=checkout)
348 _postshareupdate(r, update, checkout=checkout)
349 return r
349 return r
350
350
351
351
352 def _prependsourcehgrc(repo):
352 def _prependsourcehgrc(repo):
353 """copies the source repo config and prepend it in current repo .hg/hgrc
353 """copies the source repo config and prepend it in current repo .hg/hgrc
354 on unshare. This is only done if the share was perfomed using share safe
354 on unshare. This is only done if the share was perfomed using share safe
355 method where we share config of source in shares"""
355 method where we share config of source in shares"""
356 srcvfs = vfsmod.vfs(repo.sharedpath)
356 srcvfs = vfsmod.vfs(repo.sharedpath)
357 dstvfs = vfsmod.vfs(repo.path)
357 dstvfs = vfsmod.vfs(repo.path)
358
358
359 if not srcvfs.exists(b'hgrc'):
359 if not srcvfs.exists(b'hgrc'):
360 return
360 return
361
361
362 currentconfig = b''
362 currentconfig = b''
363 if dstvfs.exists(b'hgrc'):
363 if dstvfs.exists(b'hgrc'):
364 currentconfig = dstvfs.read(b'hgrc')
364 currentconfig = dstvfs.read(b'hgrc')
365
365
366 with dstvfs(b'hgrc', b'wb') as fp:
366 with dstvfs(b'hgrc', b'wb') as fp:
367 sourceconfig = srcvfs.read(b'hgrc')
367 sourceconfig = srcvfs.read(b'hgrc')
368 fp.write(b"# Config copied from shared source\n")
368 fp.write(b"# Config copied from shared source\n")
369 fp.write(sourceconfig)
369 fp.write(sourceconfig)
370 fp.write(b'\n')
370 fp.write(b'\n')
371 fp.write(currentconfig)
371 fp.write(currentconfig)
372
372
373
373
374 def unshare(ui, repo):
374 def unshare(ui, repo):
375 """convert a shared repository to a normal one
375 """convert a shared repository to a normal one
376
376
377 Copy the store data to the repo and remove the sharedpath data.
377 Copy the store data to the repo and remove the sharedpath data.
378
378
379 Returns a new repository object representing the unshared repository.
379 Returns a new repository object representing the unshared repository.
380
380
381 The passed repository object is not usable after this function is
381 The passed repository object is not usable after this function is
382 called.
382 called.
383 """
383 """
384
384
385 with repo.lock():
385 with repo.lock():
386 # we use locks here because if we race with commit, we
386 # we use locks here because if we race with commit, we
387 # can end up with extra data in the cloned revlogs that's
387 # can end up with extra data in the cloned revlogs that's
388 # not pointed to by changesets, thus causing verify to
388 # not pointed to by changesets, thus causing verify to
389 # fail
389 # fail
390 destlock = copystore(ui, repo, repo.path)
390 destlock = copystore(ui, repo, repo.path)
391 with destlock or util.nullcontextmanager():
391 with destlock or util.nullcontextmanager():
392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
393 # we were sharing .hg/hgrc of the share source with the current
393 # we were sharing .hg/hgrc of the share source with the current
394 # repo. We need to copy that while unsharing otherwise it can
394 # repo. We need to copy that while unsharing otherwise it can
395 # disable hooks and other checks
395 # disable hooks and other checks
396 _prependsourcehgrc(repo)
396 _prependsourcehgrc(repo)
397
397
398 sharefile = repo.vfs.join(b'sharedpath')
398 sharefile = repo.vfs.join(b'sharedpath')
399 util.rename(sharefile, sharefile + b'.old')
399 util.rename(sharefile, sharefile + b'.old')
400
400
401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
403 scmutil.writereporequirements(repo)
403 scmutil.writereporequirements(repo)
404
404
405 # Removing share changes some fundamental properties of the repo instance.
405 # Removing share changes some fundamental properties of the repo instance.
406 # So we instantiate a new repo object and operate on it rather than
406 # So we instantiate a new repo object and operate on it rather than
407 # try to keep the existing repo usable.
407 # try to keep the existing repo usable.
408 newrepo = repository(repo.baseui, repo.root, create=False)
408 newrepo = repository(repo.baseui, repo.root, create=False)
409
409
410 # TODO: figure out how to access subrepos that exist, but were previously
410 # TODO: figure out how to access subrepos that exist, but were previously
411 # removed from .hgsub
411 # removed from .hgsub
412 c = newrepo[b'.']
412 c = newrepo[b'.']
413 subs = c.substate
413 subs = c.substate
414 for s in sorted(subs):
414 for s in sorted(subs):
415 c.sub(s).unshare()
415 c.sub(s).unshare()
416
416
417 localrepo.poisonrepository(repo)
417 localrepo.poisonrepository(repo)
418
418
419 return newrepo
419 return newrepo
420
420
421
421
422 def postshare(sourcerepo, destrepo, defaultpath=None):
422 def postshare(sourcerepo, destrepo, defaultpath=None):
423 """Called after a new shared repo is created.
423 """Called after a new shared repo is created.
424
424
425 The new repo only has a requirements file and pointer to the source.
425 The new repo only has a requirements file and pointer to the source.
426 This function configures additional shared data.
426 This function configures additional shared data.
427
427
428 Extensions can wrap this function and write additional entries to
428 Extensions can wrap this function and write additional entries to
429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
430 """
430 """
431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
432 if default:
432 if default:
433 template = b'[paths]\ndefault = %s\n'
433 template = b'[paths]\ndefault = %s\n'
434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
436 with destrepo.wlock():
436 with destrepo.wlock():
437 narrowspec.copytoworkingcopy(destrepo)
437 narrowspec.copytoworkingcopy(destrepo)
438
438
439
439
440 def _postshareupdate(repo, update, checkout=None):
440 def _postshareupdate(repo, update, checkout=None):
441 """Maybe perform a working directory update after a shared repo is created.
441 """Maybe perform a working directory update after a shared repo is created.
442
442
443 ``update`` can be a boolean or a revision to update to.
443 ``update`` can be a boolean or a revision to update to.
444 """
444 """
445 if not update:
445 if not update:
446 return
446 return
447
447
448 repo.ui.status(_(b"updating working directory\n"))
448 repo.ui.status(_(b"updating working directory\n"))
449 if update is not True:
449 if update is not True:
450 checkout = update
450 checkout = update
451 for test in (checkout, b'default', b'tip'):
451 for test in (checkout, b'default', b'tip'):
452 if test is None:
452 if test is None:
453 continue
453 continue
454 try:
454 try:
455 uprev = repo.lookup(test)
455 uprev = repo.lookup(test)
456 break
456 break
457 except error.RepoLookupError:
457 except error.RepoLookupError:
458 continue
458 continue
459 _update(repo, uprev)
459 _update(repo, uprev)
460
460
461
461
462 def copystore(ui, srcrepo, destpath):
462 def copystore(ui, srcrepo, destpath):
463 """copy files from store of srcrepo in destpath
463 """copy files from store of srcrepo in destpath
464
464
465 returns destlock
465 returns destlock
466 """
466 """
467 destlock = None
467 destlock = None
468 try:
468 try:
469 hardlink = None
469 hardlink = None
470 topic = _(b'linking') if hardlink else _(b'copying')
470 topic = _(b'linking') if hardlink else _(b'copying')
471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
472 num = 0
472 num = 0
473 srcpublishing = srcrepo.publishing()
473 srcpublishing = srcrepo.publishing()
474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
475 dstvfs = vfsmod.vfs(destpath)
475 dstvfs = vfsmod.vfs(destpath)
476 for f in srcrepo.store.copylist():
476 for f in srcrepo.store.copylist():
477 if srcpublishing and f.endswith(b'phaseroots'):
477 if srcpublishing and f.endswith(b'phaseroots'):
478 continue
478 continue
479 dstbase = os.path.dirname(f)
479 dstbase = os.path.dirname(f)
480 if dstbase and not dstvfs.exists(dstbase):
480 if dstbase and not dstvfs.exists(dstbase):
481 dstvfs.mkdir(dstbase)
481 dstvfs.mkdir(dstbase)
482 if srcvfs.exists(f):
482 if srcvfs.exists(f):
483 if f.endswith(b'data'):
483 if f.endswith(b'data'):
484 # 'dstbase' may be empty (e.g. revlog format 0)
484 # 'dstbase' may be empty (e.g. revlog format 0)
485 lockfile = os.path.join(dstbase, b"lock")
485 lockfile = os.path.join(dstbase, b"lock")
486 # lock to avoid premature writing to the target
486 # lock to avoid premature writing to the target
487 destlock = lock.lock(dstvfs, lockfile)
487 destlock = lock.lock(dstvfs, lockfile)
488 hardlink, n = util.copyfiles(
488 hardlink, n = util.copyfiles(
489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
490 )
490 )
491 num += n
491 num += n
492 if hardlink:
492 if hardlink:
493 ui.debug(b"linked %d files\n" % num)
493 ui.debug(b"linked %d files\n" % num)
494 else:
494 else:
495 ui.debug(b"copied %d files\n" % num)
495 ui.debug(b"copied %d files\n" % num)
496 return destlock
496 return destlock
497 except: # re-raises
497 except: # re-raises
498 release(destlock)
498 release(destlock)
499 raise
499 raise
500
500
501
501
502 def clonewithshare(
502 def clonewithshare(
503 ui,
503 ui,
504 peeropts,
504 peeropts,
505 sharepath,
505 sharepath,
506 source,
506 source,
507 srcpeer,
507 srcpeer,
508 dest,
508 dest,
509 pull=False,
509 pull=False,
510 rev=None,
510 rev=None,
511 update=True,
511 update=True,
512 stream=False,
512 stream=False,
513 ):
513 ):
514 """Perform a clone using a shared repo.
514 """Perform a clone using a shared repo.
515
515
516 The store for the repository will be located at <sharepath>/.hg. The
516 The store for the repository will be located at <sharepath>/.hg. The
517 specified revisions will be cloned or pulled from "source". A shared repo
517 specified revisions will be cloned or pulled from "source". A shared repo
518 will be created at "dest" and a working copy will be created if "update" is
518 will be created at "dest" and a working copy will be created if "update" is
519 True.
519 True.
520 """
520 """
521 revs = None
521 revs = None
522 if rev:
522 if rev:
523 if not srcpeer.capable(b'lookup'):
523 if not srcpeer.capable(b'lookup'):
524 raise error.Abort(
524 raise error.Abort(
525 _(
525 _(
526 b"src repository does not support "
526 b"src repository does not support "
527 b"revision lookup and so doesn't "
527 b"revision lookup and so doesn't "
528 b"support clone by revision"
528 b"support clone by revision"
529 )
529 )
530 )
530 )
531
531
532 # TODO this is batchable.
532 # TODO this is batchable.
533 remoterevs = []
533 remoterevs = []
534 for r in rev:
534 for r in rev:
535 with srcpeer.commandexecutor() as e:
535 with srcpeer.commandexecutor() as e:
536 remoterevs.append(
536 remoterevs.append(
537 e.callcommand(
537 e.callcommand(
538 b'lookup',
538 b'lookup',
539 {
539 {
540 b'key': r,
540 b'key': r,
541 },
541 },
542 ).result()
542 ).result()
543 )
543 )
544 revs = remoterevs
544 revs = remoterevs
545
545
546 # Obtain a lock before checking for or cloning the pooled repo otherwise
546 # Obtain a lock before checking for or cloning the pooled repo otherwise
547 # 2 clients may race creating or populating it.
547 # 2 clients may race creating or populating it.
548 pooldir = os.path.dirname(sharepath)
548 pooldir = os.path.dirname(sharepath)
549 # lock class requires the directory to exist.
549 # lock class requires the directory to exist.
550 try:
550 try:
551 util.makedir(pooldir, False)
551 util.makedir(pooldir, False)
552 except FileExistsError:
552 except FileExistsError:
553 pass
553 pass
554
554
555 poolvfs = vfsmod.vfs(pooldir)
555 poolvfs = vfsmod.vfs(pooldir)
556 basename = os.path.basename(sharepath)
556 basename = os.path.basename(sharepath)
557
557
558 with lock.lock(poolvfs, b'%s.lock' % basename):
558 with lock.lock(poolvfs, b'%s.lock' % basename):
559 if os.path.exists(sharepath):
559 if os.path.exists(sharepath):
560 ui.status(
560 ui.status(
561 _(b'(sharing from existing pooled repository %s)\n') % basename
561 _(b'(sharing from existing pooled repository %s)\n') % basename
562 )
562 )
563 else:
563 else:
564 ui.status(
564 ui.status(
565 _(b'(sharing from new pooled repository %s)\n') % basename
565 _(b'(sharing from new pooled repository %s)\n') % basename
566 )
566 )
567 # Always use pull mode because hardlinks in share mode don't work
567 # Always use pull mode because hardlinks in share mode don't work
568 # well. Never update because working copies aren't necessary in
568 # well. Never update because working copies aren't necessary in
569 # share mode.
569 # share mode.
570 clone(
570 clone(
571 ui,
571 ui,
572 peeropts,
572 peeropts,
573 source,
573 source,
574 dest=sharepath,
574 dest=sharepath,
575 pull=True,
575 pull=True,
576 revs=rev,
576 revs=rev,
577 update=False,
577 update=False,
578 stream=stream,
578 stream=stream,
579 )
579 )
580
580
581 # Resolve the value to put in [paths] section for the source.
581 # Resolve the value to put in [paths] section for the source.
582 if islocal(source):
582 if islocal(source):
583 defaultpath = util.abspath(urlutil.urllocalpath(source))
583 defaultpath = util.abspath(urlutil.urllocalpath(source))
584 else:
584 else:
585 defaultpath = source
585 defaultpath = source
586
586
587 sharerepo = repository(ui, path=sharepath)
587 sharerepo = repository(ui, path=sharepath)
588 destrepo = share(
588 destrepo = share(
589 ui,
589 ui,
590 sharerepo,
590 sharerepo,
591 dest=dest,
591 dest=dest,
592 update=False,
592 update=False,
593 bookmarks=False,
593 bookmarks=False,
594 defaultpath=defaultpath,
594 defaultpath=defaultpath,
595 )
595 )
596
596
597 # We need to perform a pull against the dest repo to fetch bookmarks
597 # We need to perform a pull against the dest repo to fetch bookmarks
598 # and other non-store data that isn't shared by default. In the case of
598 # and other non-store data that isn't shared by default. In the case of
599 # non-existing shared repo, this means we pull from the remote twice. This
599 # non-existing shared repo, this means we pull from the remote twice. This
600 # is a bit weird. But at the time it was implemented, there wasn't an easy
600 # is a bit weird. But at the time it was implemented, there wasn't an easy
601 # way to pull just non-changegroup data.
601 # way to pull just non-changegroup data.
602 exchange.pull(destrepo, srcpeer, heads=revs)
602 exchange.pull(destrepo, srcpeer, heads=revs)
603
603
604 _postshareupdate(destrepo, update)
604 _postshareupdate(destrepo, update)
605
605
606 return srcpeer, peer(ui, peeropts, dest)
606 return srcpeer, peer(ui, peeropts, dest)
607
607
608
608
609 # Recomputing caches is often slow on big repos, so copy them.
609 # Recomputing caches is often slow on big repos, so copy them.
610 def _copycache(srcrepo, dstcachedir, fname):
610 def _copycache(srcrepo, dstcachedir, fname):
611 """copy a cache from srcrepo to destcachedir (if it exists)"""
611 """copy a cache from srcrepo to destcachedir (if it exists)"""
612 srcfname = srcrepo.cachevfs.join(fname)
612 srcfname = srcrepo.cachevfs.join(fname)
613 dstfname = os.path.join(dstcachedir, fname)
613 dstfname = os.path.join(dstcachedir, fname)
614 if os.path.exists(srcfname):
614 if os.path.exists(srcfname):
615 if not os.path.exists(dstcachedir):
615 if not os.path.exists(dstcachedir):
616 os.mkdir(dstcachedir)
616 os.mkdir(dstcachedir)
617 util.copyfile(srcfname, dstfname)
617 util.copyfile(srcfname, dstfname)
618
618
619
619
620 def clone(
620 def clone(
621 ui,
621 ui,
622 peeropts,
622 peeropts,
623 source,
623 source,
624 dest=None,
624 dest=None,
625 pull=False,
625 pull=False,
626 revs=None,
626 revs=None,
627 update=True,
627 update=True,
628 stream=False,
628 stream=False,
629 branch=None,
629 branch=None,
630 shareopts=None,
630 shareopts=None,
631 storeincludepats=None,
631 storeincludepats=None,
632 storeexcludepats=None,
632 storeexcludepats=None,
633 depth=None,
633 depth=None,
634 ):
634 ):
635 """Make a copy of an existing repository.
635 """Make a copy of an existing repository.
636
636
637 Create a copy of an existing repository in a new directory. The
637 Create a copy of an existing repository in a new directory. The
638 source and destination are URLs, as passed to the repository
638 source and destination are URLs, as passed to the repository
639 function. Returns a pair of repository peers, the source and
639 function. Returns a pair of repository peers, the source and
640 newly created destination.
640 newly created destination.
641
641
642 The location of the source is added to the new repository's
642 The location of the source is added to the new repository's
643 .hg/hgrc file, as the default to be used for future pulls and
643 .hg/hgrc file, as the default to be used for future pulls and
644 pushes.
644 pushes.
645
645
646 If an exception is raised, the partly cloned/updated destination
646 If an exception is raised, the partly cloned/updated destination
647 repository will be deleted.
647 repository will be deleted.
648
648
649 Arguments:
649 Arguments:
650
650
651 source: repository object or URL
651 source: repository object or URL
652
652
653 dest: URL of destination repository to create (defaults to base
653 dest: URL of destination repository to create (defaults to base
654 name of source repository)
654 name of source repository)
655
655
656 pull: always pull from source repository, even in local case or if the
656 pull: always pull from source repository, even in local case or if the
657 server prefers streaming
657 server prefers streaming
658
658
659 stream: stream raw data uncompressed from repository (fast over
659 stream: stream raw data uncompressed from repository (fast over
660 LAN, slow over WAN)
660 LAN, slow over WAN)
661
661
662 revs: revision to clone up to (implies pull=True)
662 revs: revision to clone up to (implies pull=True)
663
663
664 update: update working directory after clone completes, if
664 update: update working directory after clone completes, if
665 destination is local repository (True means update to default rev,
665 destination is local repository (True means update to default rev,
666 anything else is treated as a revision)
666 anything else is treated as a revision)
667
667
668 branch: branches to clone
668 branch: branches to clone
669
669
670 shareopts: dict of options to control auto sharing behavior. The "pool" key
670 shareopts: dict of options to control auto sharing behavior. The "pool" key
671 activates auto sharing mode and defines the directory for stores. The
671 activates auto sharing mode and defines the directory for stores. The
672 "mode" key determines how to construct the directory name of the shared
672 "mode" key determines how to construct the directory name of the shared
673 repository. "identity" means the name is derived from the node of the first
673 repository. "identity" means the name is derived from the node of the first
674 changeset in the repository. "remote" means the name is derived from the
674 changeset in the repository. "remote" means the name is derived from the
675 remote's path/URL. Defaults to "identity."
675 remote's path/URL. Defaults to "identity."
676
676
677 storeincludepats and storeexcludepats: sets of file patterns to include and
677 storeincludepats and storeexcludepats: sets of file patterns to include and
678 exclude in the repository copy, respectively. If not defined, all files
678 exclude in the repository copy, respectively. If not defined, all files
679 will be included (a "full" clone). Otherwise a "narrow" clone containing
679 will be included (a "full" clone). Otherwise a "narrow" clone containing
680 only the requested files will be performed. If ``storeincludepats`` is not
680 only the requested files will be performed. If ``storeincludepats`` is not
681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
682 ``path:.``. If both are empty sets, no files will be cloned.
682 ``path:.``. If both are empty sets, no files will be cloned.
683 """
683 """
684
684
685 if isinstance(source, bytes):
685 if isinstance(source, bytes):
686 src = urlutil.get_clone_path(ui, source, branch)
686 src = urlutil.get_clone_path(ui, source, branch)
687 origsource, source, branches = src
687 origsource, source, branches = src
688 srcpeer = peer(ui, peeropts, source)
688 srcpeer = peer(ui, peeropts, source)
689 else:
689 else:
690 srcpeer = source.peer() # in case we were called with a localrepo
690 srcpeer = source.peer() # in case we were called with a localrepo
691 branches = (None, branch or [])
691 branches = (None, branch or [])
692 origsource = source = srcpeer.url()
692 origsource = source = srcpeer.url()
693 srclock = destlock = destwlock = cleandir = None
693 srclock = destlock = destwlock = cleandir = None
694 destpeer = None
694 destpeer = None
695 try:
695 try:
696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
697
697
698 if dest is None:
698 if dest is None:
699 dest = defaultdest(source)
699 dest = defaultdest(source)
700 if dest:
700 if dest:
701 ui.status(_(b"destination directory: %s\n") % dest)
701 ui.status(_(b"destination directory: %s\n") % dest)
702 else:
702 else:
703 dest = urlutil.get_clone_path(ui, dest)[0]
703 dest = urlutil.get_clone_path(ui, dest)[0]
704
704
705 dest = urlutil.urllocalpath(dest)
705 dest = urlutil.urllocalpath(dest)
706 source = urlutil.urllocalpath(source)
706 source = urlutil.urllocalpath(source)
707
707
708 if not dest:
708 if not dest:
709 raise error.InputError(_(b"empty destination path is not valid"))
709 raise error.InputError(_(b"empty destination path is not valid"))
710
710
711 destvfs = vfsmod.vfs(dest, expandpath=True)
711 destvfs = vfsmod.vfs(dest, expandpath=True)
712 if destvfs.lexists():
712 if destvfs.lexists():
713 if not destvfs.isdir():
713 if not destvfs.isdir():
714 raise error.InputError(
714 raise error.InputError(
715 _(b"destination '%s' already exists") % dest
715 _(b"destination '%s' already exists") % dest
716 )
716 )
717 elif destvfs.listdir():
717 elif destvfs.listdir():
718 raise error.InputError(
718 raise error.InputError(
719 _(b"destination '%s' is not empty") % dest
719 _(b"destination '%s' is not empty") % dest
720 )
720 )
721
721
722 createopts = {}
722 createopts = {}
723 narrow = False
723 narrow = False
724
724
725 if storeincludepats is not None:
725 if storeincludepats is not None:
726 narrowspec.validatepatterns(storeincludepats)
726 narrowspec.validatepatterns(storeincludepats)
727 narrow = True
727 narrow = True
728
728
729 if storeexcludepats is not None:
729 if storeexcludepats is not None:
730 narrowspec.validatepatterns(storeexcludepats)
730 narrowspec.validatepatterns(storeexcludepats)
731 narrow = True
731 narrow = True
732
732
733 if narrow:
733 if narrow:
734 # Include everything by default if only exclusion patterns defined.
734 # Include everything by default if only exclusion patterns defined.
735 if storeexcludepats and not storeincludepats:
735 if storeexcludepats and not storeincludepats:
736 storeincludepats = {b'path:.'}
736 storeincludepats = {b'path:.'}
737
737
738 createopts[b'narrowfiles'] = True
738 createopts[b'narrowfiles'] = True
739
739
740 if depth:
740 if depth:
741 createopts[b'shallowfilestore'] = True
741 createopts[b'shallowfilestore'] = True
742
742
743 if srcpeer.capable(b'lfs-serve'):
743 if srcpeer.capable(b'lfs-serve'):
744 # Repository creation honors the config if it disabled the extension, so
744 # Repository creation honors the config if it disabled the extension, so
745 # we can't just announce that lfs will be enabled. This check avoids
745 # we can't just announce that lfs will be enabled. This check avoids
746 # saying that lfs will be enabled, and then saying it's an unknown
746 # saying that lfs will be enabled, and then saying it's an unknown
747 # feature. The lfs creation option is set in either case so that a
747 # feature. The lfs creation option is set in either case so that a
748 # requirement is added. If the extension is explicitly disabled but the
748 # requirement is added. If the extension is explicitly disabled but the
749 # requirement is set, the clone aborts early, before transferring any
749 # requirement is set, the clone aborts early, before transferring any
750 # data.
750 # data.
751 createopts[b'lfs'] = True
751 createopts[b'lfs'] = True
752
752
753 if extensions.disabled_help(b'lfs'):
753 if extensions.disabled_help(b'lfs'):
754 ui.status(
754 ui.status(
755 _(
755 _(
756 b'(remote is using large file support (lfs), but it is '
756 b'(remote is using large file support (lfs), but it is '
757 b'explicitly disabled in the local configuration)\n'
757 b'explicitly disabled in the local configuration)\n'
758 )
758 )
759 )
759 )
760 else:
760 else:
761 ui.status(
761 ui.status(
762 _(
762 _(
763 b'(remote is using large file support (lfs); lfs will '
763 b'(remote is using large file support (lfs); lfs will '
764 b'be enabled for this repository)\n'
764 b'be enabled for this repository)\n'
765 )
765 )
766 )
766 )
767
767
768 shareopts = shareopts or {}
768 shareopts = shareopts or {}
769 sharepool = shareopts.get(b'pool')
769 sharepool = shareopts.get(b'pool')
770 sharenamemode = shareopts.get(b'mode')
770 sharenamemode = shareopts.get(b'mode')
771 if sharepool and islocal(dest):
771 if sharepool and islocal(dest):
772 sharepath = None
772 sharepath = None
773 if sharenamemode == b'identity':
773 if sharenamemode == b'identity':
774 # Resolve the name from the initial changeset in the remote
774 # Resolve the name from the initial changeset in the remote
775 # repository. This returns nullid when the remote is empty. It
775 # repository. This returns nullid when the remote is empty. It
776 # raises RepoLookupError if revision 0 is filtered or otherwise
776 # raises RepoLookupError if revision 0 is filtered or otherwise
777 # not available. If we fail to resolve, sharing is not enabled.
777 # not available. If we fail to resolve, sharing is not enabled.
778 try:
778 try:
779 with srcpeer.commandexecutor() as e:
779 with srcpeer.commandexecutor() as e:
780 rootnode = e.callcommand(
780 rootnode = e.callcommand(
781 b'lookup',
781 b'lookup',
782 {
782 {
783 b'key': b'0',
783 b'key': b'0',
784 },
784 },
785 ).result()
785 ).result()
786
786
787 if rootnode != sha1nodeconstants.nullid:
787 if rootnode != sha1nodeconstants.nullid:
788 sharepath = os.path.join(sharepool, hex(rootnode))
788 sharepath = os.path.join(sharepool, hex(rootnode))
789 else:
789 else:
790 ui.status(
790 ui.status(
791 _(
791 _(
792 b'(not using pooled storage: '
792 b'(not using pooled storage: '
793 b'remote appears to be empty)\n'
793 b'remote appears to be empty)\n'
794 )
794 )
795 )
795 )
796 except error.RepoLookupError:
796 except error.RepoLookupError:
797 ui.status(
797 ui.status(
798 _(
798 _(
799 b'(not using pooled storage: '
799 b'(not using pooled storage: '
800 b'unable to resolve identity of remote)\n'
800 b'unable to resolve identity of remote)\n'
801 )
801 )
802 )
802 )
803 elif sharenamemode == b'remote':
803 elif sharenamemode == b'remote':
804 sharepath = os.path.join(
804 sharepath = os.path.join(
805 sharepool, hex(hashutil.sha1(source).digest())
805 sharepool, hex(hashutil.sha1(source).digest())
806 )
806 )
807 else:
807 else:
808 raise error.Abort(
808 raise error.Abort(
809 _(b'unknown share naming mode: %s') % sharenamemode
809 _(b'unknown share naming mode: %s') % sharenamemode
810 )
810 )
811
811
812 # TODO this is a somewhat arbitrary restriction.
812 # TODO this is a somewhat arbitrary restriction.
813 if narrow:
813 if narrow:
814 ui.status(
814 ui.status(
815 _(b'(pooled storage not supported for narrow clones)\n')
815 _(b'(pooled storage not supported for narrow clones)\n')
816 )
816 )
817 sharepath = None
817 sharepath = None
818
818
819 if sharepath:
819 if sharepath:
820 return clonewithshare(
820 return clonewithshare(
821 ui,
821 ui,
822 peeropts,
822 peeropts,
823 sharepath,
823 sharepath,
824 source,
824 source,
825 srcpeer,
825 srcpeer,
826 dest,
826 dest,
827 pull=pull,
827 pull=pull,
828 rev=revs,
828 rev=revs,
829 update=update,
829 update=update,
830 stream=stream,
830 stream=stream,
831 )
831 )
832
832
833 srcrepo = srcpeer.local()
833 srcrepo = srcpeer.local()
834
834
835 abspath = origsource
835 abspath = origsource
836 if islocal(origsource):
836 if islocal(origsource):
837 abspath = util.abspath(urlutil.urllocalpath(origsource))
837 abspath = util.abspath(urlutil.urllocalpath(origsource))
838
838
839 if islocal(dest):
839 if islocal(dest):
840 if os.path.exists(dest):
840 if os.path.exists(dest):
841 # only clean up directories we create ourselves
841 # only clean up directories we create ourselves
842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
843 cleandir = hgdir
843 cleandir = hgdir
844 else:
844 else:
845 cleandir = dest
845 cleandir = dest
846
846
847 copy = False
847 copy = False
848 if (
848 if (
849 srcrepo
849 srcrepo
850 and srcrepo.cancopy()
850 and srcrepo.cancopy()
851 and islocal(dest)
851 and islocal(dest)
852 and not phases.hassecret(srcrepo)
852 and not phases.hassecret(srcrepo)
853 ):
853 ):
854 copy = not pull and not revs
854 copy = not pull and not revs
855
855
856 # TODO this is a somewhat arbitrary restriction.
856 # TODO this is a somewhat arbitrary restriction.
857 if narrow:
857 if narrow:
858 copy = False
858 copy = False
859
859
860 if copy:
860 if copy:
861 try:
861 try:
862 # we use a lock here because if we race with commit, we
862 # we use a lock here because if we race with commit, we
863 # can end up with extra data in the cloned revlogs that's
863 # can end up with extra data in the cloned revlogs that's
864 # not pointed to by changesets, thus causing verify to
864 # not pointed to by changesets, thus causing verify to
865 # fail
865 # fail
866 srclock = srcrepo.lock(wait=False)
866 srclock = srcrepo.lock(wait=False)
867 except error.LockError:
867 except error.LockError:
868 copy = False
868 copy = False
869
869
870 if copy:
870 if copy:
871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
872
872
873 destrootpath = urlutil.urllocalpath(dest)
873 destrootpath = urlutil.urllocalpath(dest)
874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
875 localrepo.createrepository(
875 localrepo.createrepository(
876 ui,
876 ui,
877 destrootpath,
877 destrootpath,
878 requirements=dest_reqs,
878 requirements=dest_reqs,
879 )
879 )
880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
881
881
882 destwlock = destrepo.wlock()
882 destwlock = destrepo.wlock()
883 destlock = destrepo.lock()
883 destlock = destrepo.lock()
884 from . import streamclone # avoid cycle
884 from . import streamclone # avoid cycle
885
885
886 streamclone.local_copy(srcrepo, destrepo)
886 streamclone.local_copy(srcrepo, destrepo)
887
887
888 # we need to re-init the repo after manually copying the data
888 # we need to re-init the repo after manually copying the data
889 # into it
889 # into it
890 destpeer = peer(srcrepo, peeropts, dest)
890 destpeer = peer(srcrepo, peeropts, dest)
891
891
892 # make the peer aware that is it already locked
892 # make the peer aware that is it already locked
893 #
893 #
894 # important:
894 # important:
895 #
895 #
896 # We still need to release that lock at the end of the function
896 # We still need to release that lock at the end of the function
897 destpeer.local()._lockref = weakref.ref(destlock)
897 destpeer.local()._lockref = weakref.ref(destlock)
898 destpeer.local()._wlockref = weakref.ref(destwlock)
898 destpeer.local()._wlockref = weakref.ref(destwlock)
899 # dirstate also needs to be copied because `_wlockref` has a reference
899 # dirstate also needs to be copied because `_wlockref` has a reference
900 # to it: this dirstate is saved to disk when the wlock is released
900 # to it: this dirstate is saved to disk when the wlock is released
901 destpeer.local().dirstate = destrepo.dirstate
901 destpeer.local().dirstate = destrepo.dirstate
902
902
903 srcrepo.hook(
903 srcrepo.hook(
904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
905 )
905 )
906 else:
906 else:
907 try:
907 try:
908 # only pass ui when no srcrepo
908 # only pass ui when no srcrepo
909 destpeer = peer(
909 destpeer = peer(
910 srcrepo or ui,
910 srcrepo or ui,
911 peeropts,
911 peeropts,
912 dest,
912 dest,
913 create=True,
913 create=True,
914 createopts=createopts,
914 createopts=createopts,
915 )
915 )
916 except FileExistsError:
916 except FileExistsError:
917 cleandir = None
917 cleandir = None
918 raise error.Abort(_(b"destination '%s' already exists") % dest)
918 raise error.Abort(_(b"destination '%s' already exists") % dest)
919
919
920 if revs:
920 if revs:
921 if not srcpeer.capable(b'lookup'):
921 if not srcpeer.capable(b'lookup'):
922 raise error.Abort(
922 raise error.Abort(
923 _(
923 _(
924 b"src repository does not support "
924 b"src repository does not support "
925 b"revision lookup and so doesn't "
925 b"revision lookup and so doesn't "
926 b"support clone by revision"
926 b"support clone by revision"
927 )
927 )
928 )
928 )
929
929
930 # TODO this is batchable.
930 # TODO this is batchable.
931 remoterevs = []
931 remoterevs = []
932 for rev in revs:
932 for rev in revs:
933 with srcpeer.commandexecutor() as e:
933 with srcpeer.commandexecutor() as e:
934 remoterevs.append(
934 remoterevs.append(
935 e.callcommand(
935 e.callcommand(
936 b'lookup',
936 b'lookup',
937 {
937 {
938 b'key': rev,
938 b'key': rev,
939 },
939 },
940 ).result()
940 ).result()
941 )
941 )
942 revs = remoterevs
942 revs = remoterevs
943
943
944 checkout = revs[0]
944 checkout = revs[0]
945 else:
945 else:
946 revs = None
946 revs = None
947 local = destpeer.local()
947 local = destpeer.local()
948 if local:
948 if local:
949 if narrow:
949 if narrow:
950 with local.wlock(), local.lock():
950 with local.wlock(), local.lock():
951 local.setnarrowpats(storeincludepats, storeexcludepats)
951 local.setnarrowpats(storeincludepats, storeexcludepats)
952 narrowspec.copytoworkingcopy(local)
952 narrowspec.copytoworkingcopy(local)
953
953
954 u = urlutil.url(abspath)
954 u = urlutil.url(abspath)
955 defaulturl = bytes(u)
955 defaulturl = bytes(u)
956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
957 if not stream:
957 if not stream:
958 if pull:
958 if pull:
959 stream = False
959 stream = False
960 else:
960 else:
961 stream = None
961 stream = None
962 # internal config: ui.quietbookmarkmove
962 # internal config: ui.quietbookmarkmove
963 overrides = {(b'ui', b'quietbookmarkmove'): True}
963 overrides = {(b'ui', b'quietbookmarkmove'): True}
964 with local.ui.configoverride(overrides, b'clone'):
964 with local.ui.configoverride(overrides, b'clone'):
965 exchange.pull(
965 exchange.pull(
966 local,
966 local,
967 srcpeer,
967 srcpeer,
968 heads=revs,
968 heads=revs,
969 streamclonerequested=stream,
969 streamclonerequested=stream,
970 includepats=storeincludepats,
970 includepats=storeincludepats,
971 excludepats=storeexcludepats,
971 excludepats=storeexcludepats,
972 depth=depth,
972 depth=depth,
973 )
973 )
974 elif srcrepo:
974 elif srcrepo:
975 # TODO lift restriction once exchange.push() accepts narrow
975 # TODO lift restriction once exchange.push() accepts narrow
976 # push.
976 # push.
977 if narrow:
977 if narrow:
978 raise error.Abort(
978 raise error.Abort(
979 _(
979 _(
980 b'narrow clone not available for '
980 b'narrow clone not available for '
981 b'remote destinations'
981 b'remote destinations'
982 )
982 )
983 )
983 )
984
984
985 exchange.push(
985 exchange.push(
986 srcrepo,
986 srcrepo,
987 destpeer,
987 destpeer,
988 revs=revs,
988 revs=revs,
989 bookmarks=srcrepo._bookmarks.keys(),
989 bookmarks=srcrepo._bookmarks.keys(),
990 )
990 )
991 else:
991 else:
992 raise error.Abort(
992 raise error.Abort(
993 _(b"clone from remote to remote not supported")
993 _(b"clone from remote to remote not supported")
994 )
994 )
995
995
996 cleandir = None
996 cleandir = None
997
997
998 destrepo = destpeer.local()
998 destrepo = destpeer.local()
999 if destrepo:
999 if destrepo:
1000 template = uimod.samplehgrcs[b'cloned']
1000 template = uimod.samplehgrcs[b'cloned']
1001 u = urlutil.url(abspath)
1001 u = urlutil.url(abspath)
1002 u.passwd = None
1002 u.passwd = None
1003 defaulturl = bytes(u)
1003 defaulturl = bytes(u)
1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1006
1006
1007 if ui.configbool(b'experimental', b'remotenames'):
1007 if ui.configbool(b'experimental', b'remotenames'):
1008 logexchange.pullremotenames(destrepo, srcpeer)
1008 logexchange.pullremotenames(destrepo, srcpeer)
1009
1009
1010 if update:
1010 if update:
1011 if update is not True:
1011 if update is not True:
1012 with srcpeer.commandexecutor() as e:
1012 with srcpeer.commandexecutor() as e:
1013 checkout = e.callcommand(
1013 checkout = e.callcommand(
1014 b'lookup',
1014 b'lookup',
1015 {
1015 {
1016 b'key': update,
1016 b'key': update,
1017 },
1017 },
1018 ).result()
1018 ).result()
1019
1019
1020 uprev = None
1020 uprev = None
1021 status = None
1021 status = None
1022 if checkout is not None:
1022 if checkout is not None:
1023 # Some extensions (at least hg-git and hg-subversion) have
1023 # Some extensions (at least hg-git and hg-subversion) have
1024 # a peer.lookup() implementation that returns a name instead
1024 # a peer.lookup() implementation that returns a name instead
1025 # of a nodeid. We work around it here until we've figured
1025 # of a nodeid. We work around it here until we've figured
1026 # out a better solution.
1026 # out a better solution.
1027 if len(checkout) == 20 and checkout in destrepo:
1027 if len(checkout) == 20 and checkout in destrepo:
1028 uprev = checkout
1028 uprev = checkout
1029 elif scmutil.isrevsymbol(destrepo, checkout):
1029 elif scmutil.isrevsymbol(destrepo, checkout):
1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1031 else:
1031 else:
1032 if update is not True:
1032 if update is not True:
1033 try:
1033 try:
1034 uprev = destrepo.lookup(update)
1034 uprev = destrepo.lookup(update)
1035 except error.RepoLookupError:
1035 except error.RepoLookupError:
1036 pass
1036 pass
1037 if uprev is None:
1037 if uprev is None:
1038 try:
1038 try:
1039 if destrepo._activebookmark:
1039 if destrepo._activebookmark:
1040 uprev = destrepo.lookup(destrepo._activebookmark)
1040 uprev = destrepo.lookup(destrepo._activebookmark)
1041 update = destrepo._activebookmark
1041 update = destrepo._activebookmark
1042 else:
1042 else:
1043 uprev = destrepo._bookmarks[b'@']
1043 uprev = destrepo._bookmarks[b'@']
1044 update = b'@'
1044 update = b'@'
1045 bn = destrepo[uprev].branch()
1045 bn = destrepo[uprev].branch()
1046 if bn == b'default':
1046 if bn == b'default':
1047 status = _(b"updating to bookmark %s\n" % update)
1047 status = _(b"updating to bookmark %s\n" % update)
1048 else:
1048 else:
1049 status = (
1049 status = (
1050 _(b"updating to bookmark %s on branch %s\n")
1050 _(b"updating to bookmark %s on branch %s\n")
1051 ) % (update, bn)
1051 ) % (update, bn)
1052 except KeyError:
1052 except KeyError:
1053 try:
1053 try:
1054 uprev = destrepo.branchtip(b'default')
1054 uprev = destrepo.branchtip(b'default')
1055 except error.RepoLookupError:
1055 except error.RepoLookupError:
1056 uprev = destrepo.lookup(b'tip')
1056 uprev = destrepo.lookup(b'tip')
1057 if not status:
1057 if not status:
1058 bn = destrepo[uprev].branch()
1058 bn = destrepo[uprev].branch()
1059 status = _(b"updating to branch %s\n") % bn
1059 status = _(b"updating to branch %s\n") % bn
1060 destrepo.ui.status(status)
1060 destrepo.ui.status(status)
1061 _update(destrepo, uprev)
1061 _update(destrepo, uprev)
1062 if update in destrepo._bookmarks:
1062 if update in destrepo._bookmarks:
1063 bookmarks.activate(destrepo, update)
1063 bookmarks.activate(destrepo, update)
1064 if destlock is not None:
1064 if destlock is not None:
1065 release(destlock)
1065 release(destlock)
1066 if destwlock is not None:
1066 if destwlock is not None:
1067 release(destlock)
1067 release(destlock)
1068 # here is a tiny windows were someone could end up writing the
1068 # here is a tiny windows were someone could end up writing the
1069 # repository before the cache are sure to be warm. This is "fine"
1069 # repository before the cache are sure to be warm. This is "fine"
1070 # as the only "bad" outcome would be some slowness. That potential
1070 # as the only "bad" outcome would be some slowness. That potential
1071 # slowness already affect reader.
1071 # slowness already affect reader.
1072 with destrepo.lock():
1072 with destrepo.lock():
1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1074 finally:
1074 finally:
1075 release(srclock, destlock, destwlock)
1075 release(srclock, destlock, destwlock)
1076 if cleandir is not None:
1076 if cleandir is not None:
1077 shutil.rmtree(cleandir, True)
1077 shutil.rmtree(cleandir, True)
1078 if srcpeer is not None:
1078 if srcpeer is not None:
1079 srcpeer.close()
1079 srcpeer.close()
1080 if destpeer and destpeer.local() is None:
1080 if destpeer and destpeer.local() is None:
1081 destpeer.close()
1081 destpeer.close()
1082 return srcpeer, destpeer
1082 return srcpeer, destpeer
1083
1083
1084
1084
1085 def _showstats(repo, stats, quietempty=False):
1085 def _showstats(repo, stats, quietempty=False):
1086 if quietempty and stats.isempty():
1086 if quietempty and stats.isempty():
1087 return
1087 return
1088 repo.ui.status(
1088 repo.ui.status(
1089 _(
1089 _(
1090 b"%d files updated, %d files merged, "
1090 b"%d files updated, %d files merged, "
1091 b"%d files removed, %d files unresolved\n"
1091 b"%d files removed, %d files unresolved\n"
1092 )
1092 )
1093 % (
1093 % (
1094 stats.updatedcount,
1094 stats.updatedcount,
1095 stats.mergedcount,
1095 stats.mergedcount,
1096 stats.removedcount,
1096 stats.removedcount,
1097 stats.unresolvedcount,
1097 stats.unresolvedcount,
1098 )
1098 )
1099 )
1099 )
1100
1100
1101
1101
1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1103 """Update the working directory to node.
1103 """Update the working directory to node.
1104
1104
1105 When overwrite is set, changes are clobbered, merged else
1105 When overwrite is set, changes are clobbered, merged else
1106
1106
1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1108 repo.ui.deprecwarn(
1108 repo.ui.deprecwarn(
1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1110 b'5.7',
1110 b'5.7',
1111 )
1111 )
1112 return mergemod._update(
1112 return mergemod._update(
1113 repo,
1113 repo,
1114 node,
1114 node,
1115 branchmerge=False,
1115 branchmerge=False,
1116 force=overwrite,
1116 force=overwrite,
1117 labels=[b'working copy', b'destination'],
1117 labels=[b'working copy', b'destination'],
1118 updatecheck=updatecheck,
1118 updatecheck=updatecheck,
1119 )
1119 )
1120
1120
1121
1121
1122 def update(repo, node, quietempty=False, updatecheck=None):
1122 def update(repo, node, quietempty=False, updatecheck=None):
1123 """update the working directory to node"""
1123 """update the working directory to node"""
1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1125 _showstats(repo, stats, quietempty)
1125 _showstats(repo, stats, quietempty)
1126 if stats.unresolvedcount:
1126 if stats.unresolvedcount:
1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1128 return stats.unresolvedcount > 0
1128 return stats.unresolvedcount > 0
1129
1129
1130
1130
1131 # naming conflict in clone()
1131 # naming conflict in clone()
1132 _update = update
1132 _update = update
1133
1133
1134
1134
1135 def clean(repo, node, show_stats=True, quietempty=False):
1135 def clean(repo, node, show_stats=True, quietempty=False):
1136 """forcibly switch the working directory to node, clobbering changes"""
1136 """forcibly switch the working directory to node, clobbering changes"""
1137 stats = mergemod.clean_update(repo[node])
1137 stats = mergemod.clean_update(repo[node])
1138 assert stats.unresolvedcount == 0
1138 assert stats.unresolvedcount == 0
1139 if show_stats:
1139 if show_stats:
1140 _showstats(repo, stats, quietempty)
1140 _showstats(repo, stats, quietempty)
1141 return False
1141 return False
1142
1142
1143
1143
1144 # naming conflict in updatetotally()
1144 # naming conflict in updatetotally()
1145 _clean = clean
1145 _clean = clean
1146
1146
1147 _VALID_UPDATECHECKS = {
1147 _VALID_UPDATECHECKS = {
1148 mergemod.UPDATECHECK_ABORT,
1148 mergemod.UPDATECHECK_ABORT,
1149 mergemod.UPDATECHECK_NONE,
1149 mergemod.UPDATECHECK_NONE,
1150 mergemod.UPDATECHECK_LINEAR,
1150 mergemod.UPDATECHECK_LINEAR,
1151 mergemod.UPDATECHECK_NO_CONFLICT,
1151 mergemod.UPDATECHECK_NO_CONFLICT,
1152 }
1152 }
1153
1153
1154
1154
1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1156 """Update the working directory with extra care for non-file components
1156 """Update the working directory with extra care for non-file components
1157
1157
1158 This takes care of non-file components below:
1158 This takes care of non-file components below:
1159
1159
1160 :bookmark: might be advanced or (in)activated
1160 :bookmark: might be advanced or (in)activated
1161
1161
1162 This takes arguments below:
1162 This takes arguments below:
1163
1163
1164 :checkout: to which revision the working directory is updated
1164 :checkout: to which revision the working directory is updated
1165 :brev: a name, which might be a bookmark to be activated after updating
1165 :brev: a name, which might be a bookmark to be activated after updating
1166 :clean: whether changes in the working directory can be discarded
1166 :clean: whether changes in the working directory can be discarded
1167 :updatecheck: how to deal with a dirty working directory
1167 :updatecheck: how to deal with a dirty working directory
1168
1168
1169 Valid values for updatecheck are the UPDATECHECK_* constants
1169 Valid values for updatecheck are the UPDATECHECK_* constants
1170 defined in the merge module. Passing `None` will result in using the
1170 defined in the merge module. Passing `None` will result in using the
1171 configured default.
1171 configured default.
1172
1172
1173 * ABORT: abort if the working directory is dirty
1173 * ABORT: abort if the working directory is dirty
1174 * NONE: don't check (merge working directory changes into destination)
1174 * NONE: don't check (merge working directory changes into destination)
1175 * LINEAR: check that update is linear before merging working directory
1175 * LINEAR: check that update is linear before merging working directory
1176 changes into destination
1176 changes into destination
1177 * NO_CONFLICT: check that the update does not result in file merges
1177 * NO_CONFLICT: check that the update does not result in file merges
1178
1178
1179 This returns whether conflict is detected at updating or not.
1179 This returns whether conflict is detected at updating or not.
1180 """
1180 """
1181 if updatecheck is None:
1181 if updatecheck is None:
1182 updatecheck = ui.config(b'commands', b'update.check')
1182 updatecheck = ui.config(b'commands', b'update.check')
1183 if updatecheck not in _VALID_UPDATECHECKS:
1183 if updatecheck not in _VALID_UPDATECHECKS:
1184 # If not configured, or invalid value configured
1184 # If not configured, or invalid value configured
1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1186 if updatecheck not in _VALID_UPDATECHECKS:
1186 if updatecheck not in _VALID_UPDATECHECKS:
1187 raise ValueError(
1187 raise ValueError(
1188 r'Invalid updatecheck value %r (can accept %r)'
1188 r'Invalid updatecheck value %r (can accept %r)'
1189 % (updatecheck, _VALID_UPDATECHECKS)
1189 % (updatecheck, _VALID_UPDATECHECKS)
1190 )
1190 )
1191 with repo.wlock():
1191 with repo.wlock():
1192 movemarkfrom = None
1192 movemarkfrom = None
1193 warndest = False
1193 warndest = False
1194 if checkout is None:
1194 if checkout is None:
1195 updata = destutil.destupdate(repo, clean=clean)
1195 updata = destutil.destupdate(repo, clean=clean)
1196 checkout, movemarkfrom, brev = updata
1196 checkout, movemarkfrom, brev = updata
1197 warndest = True
1197 warndest = True
1198
1198
1199 if clean:
1199 if clean:
1200 ret = _clean(repo, checkout)
1200 ret = _clean(repo, checkout)
1201 else:
1201 else:
1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1203 cmdutil.bailifchanged(repo, merge=False)
1203 cmdutil.bailifchanged(repo, merge=False)
1204 updatecheck = mergemod.UPDATECHECK_NONE
1204 updatecheck = mergemod.UPDATECHECK_NONE
1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1206
1206
1207 if not ret and movemarkfrom:
1207 if not ret and movemarkfrom:
1208 if movemarkfrom == repo[b'.'].node():
1208 if movemarkfrom == repo[b'.'].node():
1209 pass # no-op update
1209 pass # no-op update
1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1212 ui.status(_(b"updating bookmark %s\n") % b)
1212 ui.status(_(b"updating bookmark %s\n") % b)
1213 else:
1213 else:
1214 # this can happen with a non-linear update
1214 # this can happen with a non-linear update
1215 b = ui.label(repo._activebookmark, b'bookmarks')
1215 b = ui.label(repo._activebookmark, b'bookmarks')
1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1217 bookmarks.deactivate(repo)
1217 bookmarks.deactivate(repo)
1218 elif brev in repo._bookmarks:
1218 elif brev in repo._bookmarks:
1219 if brev != repo._activebookmark:
1219 if brev != repo._activebookmark:
1220 b = ui.label(brev, b'bookmarks.active')
1220 b = ui.label(brev, b'bookmarks.active')
1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1222 bookmarks.activate(repo, brev)
1222 bookmarks.activate(repo, brev)
1223 elif brev:
1223 elif brev:
1224 if repo._activebookmark:
1224 if repo._activebookmark:
1225 b = ui.label(repo._activebookmark, b'bookmarks')
1225 b = ui.label(repo._activebookmark, b'bookmarks')
1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1227 bookmarks.deactivate(repo)
1227 bookmarks.deactivate(repo)
1228
1228
1229 if warndest:
1229 if warndest:
1230 destutil.statusotherdests(ui, repo)
1230 destutil.statusotherdests(ui, repo)
1231
1231
1232 return ret
1232 return ret
1233
1233
1234
1234
1235 def merge(
1235 def merge(
1236 ctx,
1236 ctx,
1237 force=False,
1237 force=False,
1238 remind=True,
1238 remind=True,
1239 labels=None,
1239 labels=None,
1240 ):
1240 ):
1241 """Branch merge with node, resolving changes. Return true if any
1241 """Branch merge with node, resolving changes. Return true if any
1242 unresolved conflicts."""
1242 unresolved conflicts."""
1243 repo = ctx.repo()
1243 repo = ctx.repo()
1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1245 _showstats(repo, stats)
1245 _showstats(repo, stats)
1246 if stats.unresolvedcount:
1246 if stats.unresolvedcount:
1247 repo.ui.status(
1247 repo.ui.status(
1248 _(
1248 _(
1249 b"use 'hg resolve' to retry unresolved file merges "
1249 b"use 'hg resolve' to retry unresolved file merges "
1250 b"or 'hg merge --abort' to abandon\n"
1250 b"or 'hg merge --abort' to abandon\n"
1251 )
1251 )
1252 )
1252 )
1253 elif remind:
1253 elif remind:
1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1255 return stats.unresolvedcount > 0
1255 return stats.unresolvedcount > 0
1256
1256
1257
1257
1258 def abortmerge(ui, repo):
1258 def abortmerge(ui, repo):
1259 ms = mergestatemod.mergestate.read(repo)
1259 ms = mergestatemod.mergestate.read(repo)
1260 if ms.active():
1260 if ms.active():
1261 # there were conflicts
1261 # there were conflicts
1262 node = ms.localctx.hex()
1262 node = ms.localctx.hex()
1263 else:
1263 else:
1264 # there were no conficts, mergestate was not stored
1264 # there were no conficts, mergestate was not stored
1265 node = repo[b'.'].hex()
1265 node = repo[b'.'].hex()
1266
1266
1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1268 stats = mergemod.clean_update(repo[node])
1268 stats = mergemod.clean_update(repo[node])
1269 assert stats.unresolvedcount == 0
1269 assert stats.unresolvedcount == 0
1270 _showstats(repo, stats)
1270 _showstats(repo, stats)
1271
1271
1272
1272
1273 def _incoming(
1273 def _incoming(
1274 displaychlist,
1274 displaychlist,
1275 subreporecurse,
1275 subreporecurse,
1276 ui,
1276 ui,
1277 repo,
1277 repo,
1278 source,
1278 source,
1279 opts,
1279 opts,
1280 buffered=False,
1280 buffered=False,
1281 subpath=None,
1281 subpath=None,
1282 ):
1282 ):
1283 """
1283 """
1284 Helper for incoming / gincoming.
1284 Helper for incoming / gincoming.
1285 displaychlist gets called with
1285 displaychlist gets called with
1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1287 and is supposed to contain only code that can't be unified.
1287 and is supposed to contain only code that can't be unified.
1288 """
1288 """
1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1290 srcs = list(srcs)
1290 srcs = list(srcs)
1291 if len(srcs) != 1:
1291 if len(srcs) != 1:
1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1293 msg %= len(srcs)
1293 msg %= len(srcs)
1294 raise error.Abort(msg)
1294 raise error.Abort(msg)
1295 path = srcs[0]
1295 path = srcs[0]
1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1297 if subpath is not None:
1297 if subpath is not None:
1298 subpath = urlutil.url(subpath)
1298 subpath = urlutil.url(subpath)
1299 if subpath.isabs():
1299 if subpath.isabs():
1300 source = bytes(subpath)
1300 source = bytes(subpath)
1301 else:
1301 else:
1302 p = urlutil.url(source)
1302 p = urlutil.url(source)
1303 if p.islocal():
1303 if p.islocal():
1304 normpath = os.path.normpath
1304 normpath = os.path.normpath
1305 else:
1305 else:
1306 normpath = posixpath.normpath
1306 normpath = posixpath.normpath
1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1308 source = bytes(p)
1308 source = bytes(p)
1309 other = peer(repo, opts, source)
1309 other = peer(repo, opts, source)
1310 cleanupfn = other.close
1310 cleanupfn = other.close
1311 try:
1311 try:
1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1314
1314
1315 if revs:
1315 if revs:
1316 revs = [other.lookup(rev) for rev in revs]
1316 revs = [other.lookup(rev) for rev in revs]
1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1319 )
1319 )
1320
1320
1321 if not chlist:
1321 if not chlist:
1322 ui.status(_(b"no changes found\n"))
1322 ui.status(_(b"no changes found\n"))
1323 return subreporecurse()
1323 return subreporecurse()
1324 ui.pager(b'incoming')
1324 ui.pager(b'incoming')
1325 displayer = logcmdutil.changesetdisplayer(
1325 displayer = logcmdutil.changesetdisplayer(
1326 ui, other, opts, buffered=buffered
1326 ui, other, opts, buffered=buffered
1327 )
1327 )
1328 displaychlist(other, chlist, displayer)
1328 displaychlist(other, chlist, displayer)
1329 displayer.close()
1329 displayer.close()
1330 finally:
1330 finally:
1331 cleanupfn()
1331 cleanupfn()
1332 subreporecurse()
1332 subreporecurse()
1333 return 0 # exit code is zero since we found incoming changes
1333 return 0 # exit code is zero since we found incoming changes
1334
1334
1335
1335
1336 def incoming(ui, repo, source, opts, subpath=None):
1336 def incoming(ui, repo, source, opts, subpath=None):
1337 def subreporecurse():
1337 def subreporecurse():
1338 ret = 1
1338 ret = 1
1339 if opts.get(b'subrepos'):
1339 if opts.get(b'subrepos'):
1340 ctx = repo[None]
1340 ctx = repo[None]
1341 for subpath in sorted(ctx.substate):
1341 for subpath in sorted(ctx.substate):
1342 sub = ctx.sub(subpath)
1342 sub = ctx.sub(subpath)
1343 ret = min(ret, sub.incoming(ui, source, opts))
1343 ret = min(ret, sub.incoming(ui, source, opts))
1344 return ret
1344 return ret
1345
1345
1346 def display(other, chlist, displayer):
1346 def display(other, chlist, displayer):
1347 limit = logcmdutil.getlimit(opts)
1347 limit = logcmdutil.getlimit(opts)
1348 if opts.get(b'newest_first'):
1348 if opts.get(b'newest_first'):
1349 chlist.reverse()
1349 chlist.reverse()
1350 count = 0
1350 count = 0
1351 for n in chlist:
1351 for n in chlist:
1352 if limit is not None and count >= limit:
1352 if limit is not None and count >= limit:
1353 break
1353 break
1354 parents = [
1354 parents = [
1355 p for p in other.changelog.parents(n) if p != repo.nullid
1355 p for p in other.changelog.parents(n) if p != repo.nullid
1356 ]
1356 ]
1357 if opts.get(b'no_merges') and len(parents) == 2:
1357 if opts.get(b'no_merges') and len(parents) == 2:
1358 continue
1358 continue
1359 count += 1
1359 count += 1
1360 displayer.show(other[n])
1360 displayer.show(other[n])
1361
1361
1362 return _incoming(
1362 return _incoming(
1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1364 )
1364 )
1365
1365
1366
1366
1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1368 out = set()
1368 out = set()
1369 others = []
1369 others = []
1370 for path in urlutil.get_push_paths(repo, ui, dests):
1370 for path in urlutil.get_push_paths(repo, ui, dests):
1371 dest = path.pushloc or path.loc
1371 dest = path.pushloc or path.loc
1372 if subpath is not None:
1372 if subpath is not None:
1373 subpath = urlutil.url(subpath)
1373 subpath = urlutil.url(subpath)
1374 if subpath.isabs():
1374 if subpath.isabs():
1375 dest = bytes(subpath)
1375 dest = bytes(subpath)
1376 else:
1376 else:
1377 p = urlutil.url(dest)
1377 p = urlutil.url(dest)
1378 if p.islocal():
1378 if p.islocal():
1379 normpath = os.path.normpath
1379 normpath = os.path.normpath
1380 else:
1380 else:
1381 normpath = posixpath.normpath
1381 normpath = posixpath.normpath
1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1383 dest = bytes(p)
1383 dest = bytes(p)
1384 branches = path.branch, opts.get(b'branch') or []
1384 branches = path.branch, opts.get(b'branch') or []
1385
1385
1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1388 if revs:
1388 if revs:
1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1390
1390
1391 other = peer(repo, opts, dest)
1391 other = peer(repo, opts, dest)
1392 try:
1392 try:
1393 outgoing = discovery.findcommonoutgoing(
1393 outgoing = discovery.findcommonoutgoing(
1394 repo, other, revs, force=opts.get(b'force')
1394 repo, other, revs, force=opts.get(b'force')
1395 )
1395 )
1396 o = outgoing.missing
1396 o = outgoing.missing
1397 out.update(o)
1397 out.update(o)
1398 if not o:
1398 if not o:
1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1400 others.append(other)
1400 others.append(other)
1401 except: # re-raises
1401 except: # re-raises
1402 other.close()
1402 other.close()
1403 raise
1403 raise
1404 # make sure this is ordered by revision number
1404 # make sure this is ordered by revision number
1405 outgoing_revs = list(out)
1405 outgoing_revs = list(out)
1406 cl = repo.changelog
1406 cl = repo.changelog
1407 outgoing_revs.sort(key=cl.rev)
1407 outgoing_revs.sort(key=cl.rev)
1408 return outgoing_revs, others
1408 return outgoing_revs, others
1409
1409
1410
1410
1411 def _outgoing_recurse(ui, repo, dests, opts):
1411 def _outgoing_recurse(ui, repo, dests, opts):
1412 ret = 1
1412 ret = 1
1413 if opts.get(b'subrepos'):
1413 if opts.get(b'subrepos'):
1414 ctx = repo[None]
1414 ctx = repo[None]
1415 for subpath in sorted(ctx.substate):
1415 for subpath in sorted(ctx.substate):
1416 sub = ctx.sub(subpath)
1416 sub = ctx.sub(subpath)
1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1418 return ret
1418 return ret
1419
1419
1420
1420
1421 def _outgoing_filter(repo, revs, opts):
1421 def _outgoing_filter(repo, revs, opts):
1422 """apply revision filtering/ordering option for outgoing"""
1422 """apply revision filtering/ordering option for outgoing"""
1423 limit = logcmdutil.getlimit(opts)
1423 limit = logcmdutil.getlimit(opts)
1424 no_merges = opts.get(b'no_merges')
1424 no_merges = opts.get(b'no_merges')
1425 if opts.get(b'newest_first'):
1425 if opts.get(b'newest_first'):
1426 revs.reverse()
1426 revs.reverse()
1427 if limit is None and not no_merges:
1427 if limit is None and not no_merges:
1428 for r in revs:
1428 for r in revs:
1429 yield r
1429 yield r
1430 return
1430 return
1431
1431
1432 count = 0
1432 count = 0
1433 cl = repo.changelog
1433 cl = repo.changelog
1434 for n in revs:
1434 for n in revs:
1435 if limit is not None and count >= limit:
1435 if limit is not None and count >= limit:
1436 break
1436 break
1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1438 if no_merges and len(parents) == 2:
1438 if no_merges and len(parents) == 2:
1439 continue
1439 continue
1440 count += 1
1440 count += 1
1441 yield n
1441 yield n
1442
1442
1443
1443
1444 def outgoing(ui, repo, dests, opts, subpath=None):
1444 def outgoing(ui, repo, dests, opts, subpath=None):
1445 if opts.get(b'graph'):
1445 if opts.get(b'graph'):
1446 logcmdutil.checkunsupportedgraphflags([], opts)
1446 logcmdutil.checkunsupportedgraphflags([], opts)
1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1448 ret = 1
1448 ret = 1
1449 try:
1449 try:
1450 if o:
1450 if o:
1451 ret = 0
1451 ret = 0
1452
1452
1453 if opts.get(b'graph'):
1453 if opts.get(b'graph'):
1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1455 ui.pager(b'outgoing')
1455 ui.pager(b'outgoing')
1456 displayer = logcmdutil.changesetdisplayer(
1456 displayer = logcmdutil.changesetdisplayer(
1457 ui, repo, opts, buffered=True
1457 ui, repo, opts, buffered=True
1458 )
1458 )
1459 logcmdutil.displaygraph(
1459 logcmdutil.displaygraph(
1460 ui, repo, revdag, displayer, graphmod.asciiedges
1460 ui, repo, revdag, displayer, graphmod.asciiedges
1461 )
1461 )
1462 else:
1462 else:
1463 ui.pager(b'outgoing')
1463 ui.pager(b'outgoing')
1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1465 for n in _outgoing_filter(repo, o, opts):
1465 for n in _outgoing_filter(repo, o, opts):
1466 displayer.show(repo[n])
1466 displayer.show(repo[n])
1467 displayer.close()
1467 displayer.close()
1468 for oth in others:
1468 for oth in others:
1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1471 return ret # exit code is zero since we found outgoing changes
1471 return ret # exit code is zero since we found outgoing changes
1472 finally:
1472 finally:
1473 for oth in others:
1473 for oth in others:
1474 oth.close()
1474 oth.close()
1475
1475
1476
1476
1477 def verify(repo, level=None):
1477 def verify(repo, level=None):
1478 """verify the consistency of a repository"""
1478 """verify the consistency of a repository"""
1479 ret = verifymod.verify(repo, level=level)
1479 ret = verifymod.verify(repo, level=level)
1480
1480
1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1483 # concern.
1483 # concern.
1484
1484
1485 # pathto() is needed for -R case
1485 # pathto() is needed for -R case
1486 revs = repo.revs(
1486 revs = repo.revs(
1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1488 )
1488 )
1489
1489
1490 if revs:
1490 if revs:
1491 repo.ui.status(_(b'checking subrepo links\n'))
1491 repo.ui.status(_(b'checking subrepo links\n'))
1492 for rev in revs:
1492 for rev in revs:
1493 ctx = repo[rev]
1493 ctx = repo[rev]
1494 try:
1494 try:
1495 for subpath in ctx.substate:
1495 for subpath in ctx.substate:
1496 try:
1496 try:
1497 ret = (
1497 ret = (
1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1499 )
1499 )
1500 except error.RepoError as e:
1500 except error.RepoError as e:
1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1502 except Exception:
1502 except Exception:
1503 repo.ui.warn(
1503 repo.ui.warn(
1504 _(b'.hgsubstate is corrupt in revision %s\n')
1504 _(b'.hgsubstate is corrupt in revision %s\n')
1505 % short(ctx.node())
1505 % short(ctx.node())
1506 )
1506 )
1507
1507
1508 return ret
1508 return ret
1509
1509
1510
1510
1511 def remoteui(src, opts):
1511 def remoteui(src, opts):
1512 """build a remote ui from ui or repo and opts"""
1512 """build a remote ui from ui or repo and opts"""
1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1514 dst = src.baseui.copy() # drop repo-specific config
1514 dst = src.baseui.copy() # drop repo-specific config
1515 src = src.ui # copy target options from repo
1515 src = src.ui # copy target options from repo
1516 else: # assume it's a global ui object
1516 else: # assume it's a global ui object
1517 dst = src.copy() # keep all global options
1517 dst = src.copy() # keep all global options
1518
1518
1519 # copy ssh-specific options
1519 # copy ssh-specific options
1520 for o in b'ssh', b'remotecmd':
1520 for o in b'ssh', b'remotecmd':
1521 v = opts.get(o) or src.config(b'ui', o)
1521 v = opts.get(o) or src.config(b'ui', o)
1522 if v:
1522 if v:
1523 dst.setconfig(b"ui", o, v, b'copied')
1523 dst.setconfig(b"ui", o, v, b'copied')
1524
1524
1525 # copy bundle-specific options
1525 # copy bundle-specific options
1526 r = src.config(b'bundle', b'mainreporoot')
1526 r = src.config(b'bundle', b'mainreporoot')
1527 if r:
1527 if r:
1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1529
1529
1530 # copy selected local settings to the remote ui
1530 # copy selected local settings to the remote ui
1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1532 for key, val in src.configitems(sect):
1532 for key, val in src.configitems(sect):
1533 dst.setconfig(sect, key, val, b'copied')
1533 dst.setconfig(sect, key, val, b'copied')
1534 v = src.config(b'web', b'cacerts')
1534 v = src.config(b'web', b'cacerts')
1535 if v:
1535 if v:
1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1537
1537
1538 return dst
1538 return dst
1539
1539
1540
1540
1541 # Files of interest
1541 # Files of interest
1542 # Used to check if the repository has changed looking at mtime and size of
1542 # Used to check if the repository has changed looking at mtime and size of
1543 # these files.
1543 # these files.
1544 foi = [
1544 foi = [
1545 (b'spath', b'00changelog.i'),
1545 (b'spath', b'00changelog.i'),
1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1547 (b'spath', b'obsstore'),
1547 (b'spath', b'obsstore'),
1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1549 ]
1549 ]
1550
1550
1551
1551
1552 class cachedlocalrepo:
1552 class cachedlocalrepo:
1553 """Holds a localrepository that can be cached and reused."""
1553 """Holds a localrepository that can be cached and reused."""
1554
1554
1555 def __init__(self, repo):
1555 def __init__(self, repo):
1556 """Create a new cached repo from an existing repo.
1556 """Create a new cached repo from an existing repo.
1557
1557
1558 We assume the passed in repo was recently created. If the
1558 We assume the passed in repo was recently created. If the
1559 repo has changed between when it was created and when it was
1559 repo has changed between when it was created and when it was
1560 turned into a cache, it may not refresh properly.
1560 turned into a cache, it may not refresh properly.
1561 """
1561 """
1562 assert isinstance(repo, localrepo.localrepository)
1562 assert isinstance(repo, localrepo.localrepository)
1563 self._repo = repo
1563 self._repo = repo
1564 self._state, self.mtime = self._repostate()
1564 self._state, self.mtime = self._repostate()
1565 self._filtername = repo.filtername
1565 self._filtername = repo.filtername
1566
1566
1567 def fetch(self):
1567 def fetch(self):
1568 """Refresh (if necessary) and return a repository.
1568 """Refresh (if necessary) and return a repository.
1569
1569
1570 If the cached instance is out of date, it will be recreated
1570 If the cached instance is out of date, it will be recreated
1571 automatically and returned.
1571 automatically and returned.
1572
1572
1573 Returns a tuple of the repo and a boolean indicating whether a new
1573 Returns a tuple of the repo and a boolean indicating whether a new
1574 repo instance was created.
1574 repo instance was created.
1575 """
1575 """
1576 # We compare the mtimes and sizes of some well-known files to
1576 # We compare the mtimes and sizes of some well-known files to
1577 # determine if the repo changed. This is not precise, as mtimes
1577 # determine if the repo changed. This is not precise, as mtimes
1578 # are susceptible to clock skew and imprecise filesystems and
1578 # are susceptible to clock skew and imprecise filesystems and
1579 # file content can change while maintaining the same size.
1579 # file content can change while maintaining the same size.
1580
1580
1581 state, mtime = self._repostate()
1581 state, mtime = self._repostate()
1582 if state == self._state:
1582 if state == self._state:
1583 return self._repo, False
1583 return self._repo, False
1584
1584
1585 repo = repository(self._repo.baseui, self._repo.url())
1585 repo = repository(self._repo.baseui, self._repo.url())
1586 if self._filtername:
1586 if self._filtername:
1587 self._repo = repo.filtered(self._filtername)
1587 self._repo = repo.filtered(self._filtername)
1588 else:
1588 else:
1589 self._repo = repo.unfiltered()
1589 self._repo = repo.unfiltered()
1590 self._state = state
1590 self._state = state
1591 self.mtime = mtime
1591 self.mtime = mtime
1592
1592
1593 return self._repo, True
1593 return self._repo, True
1594
1594
1595 def _repostate(self):
1595 def _repostate(self):
1596 state = []
1596 state = []
1597 maxmtime = -1
1597 maxmtime = -1
1598 for attr, fname in foi:
1598 for attr, fname in foi:
1599 prefix = getattr(self._repo, attr)
1599 prefix = getattr(self._repo, attr)
1600 p = os.path.join(prefix, fname)
1600 p = os.path.join(prefix, fname)
1601 try:
1601 try:
1602 st = os.stat(p)
1602 st = os.stat(p)
1603 except OSError:
1603 except OSError:
1604 st = os.stat(prefix)
1604 st = os.stat(prefix)
1605 state.append((st[stat.ST_MTIME], st.st_size))
1605 state.append((st[stat.ST_MTIME], st.st_size))
1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1607
1607
1608 return tuple(state), maxmtime
1608 return tuple(state), maxmtime
1609
1609
1610 def copy(self):
1610 def copy(self):
1611 """Obtain a copy of this class instance.
1611 """Obtain a copy of this class instance.
1612
1612
1613 A new localrepository instance is obtained. The new instance should be
1613 A new localrepository instance is obtained. The new instance should be
1614 completely independent of the original.
1614 completely independent of the original.
1615 """
1615 """
1616 repo = repository(self._repo.baseui, self._repo.origroot)
1616 repo = repository(self._repo.baseui, self._repo.origroot)
1617 if self._filtername:
1617 if self._filtername:
1618 repo = repo.filtered(self._filtername)
1618 repo = repo.filtered(self._filtername)
1619 else:
1619 else:
1620 repo = repo.unfiltered()
1620 repo = repo.unfiltered()
1621 c = cachedlocalrepo(repo)
1621 c = cachedlocalrepo(repo)
1622 c._state = self._state
1622 c._state = self._state
1623 c.mtime = self.mtime
1623 c.mtime = self.mtime
1624 return c
1624 return c
@@ -1,265 +1,265 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import sha1nodeconstants
14 from .node import sha1nodeconstants
15 from . import (
15 from . import (
16 branchmap,
16 branchmap,
17 changelog,
17 changelog,
18 error,
18 error,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 namespaces,
21 namespaces,
22 pathutil,
22 pathutil,
23 pycompat,
23 pycompat,
24 requirements as requirementsmod,
24 requirements as requirementsmod,
25 url,
25 url,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 urlutil,
30 urlutil,
31 )
31 )
32
32
33 urlerr = util.urlerr
33 urlerr = util.urlerr
34 urlreq = util.urlreq
34 urlreq = util.urlreq
35
35
36
36
37 class httprangereader:
37 class httprangereader:
38 def __init__(self, url, opener):
38 def __init__(self, url, opener):
39 # we assume opener has HTTPRangeHandler
39 # we assume opener has HTTPRangeHandler
40 self.url = url
40 self.url = url
41 self.pos = 0
41 self.pos = 0
42 self.opener = opener
42 self.opener = opener
43 self.name = url
43 self.name = url
44
44
45 def __enter__(self):
45 def __enter__(self):
46 return self
46 return self
47
47
48 def __exit__(self, exc_type, exc_value, traceback):
48 def __exit__(self, exc_type, exc_value, traceback):
49 self.close()
49 self.close()
50
50
51 def seek(self, pos):
51 def seek(self, pos):
52 self.pos = pos
52 self.pos = pos
53
53
54 def read(self, bytes=None):
54 def read(self, bytes=None):
55 req = urlreq.request(pycompat.strurl(self.url))
55 req = urlreq.request(pycompat.strurl(self.url))
56 end = b''
56 end = b''
57 if bytes:
57 if bytes:
58 end = self.pos + bytes - 1
58 end = self.pos + bytes - 1
59 if self.pos or end:
59 if self.pos or end:
60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
61
61
62 try:
62 try:
63 f = self.opener.open(req)
63 f = self.opener.open(req)
64 data = f.read()
64 data = f.read()
65 code = f.code
65 code = f.code
66 except urlerr.httperror as inst:
66 except urlerr.httperror as inst:
67 num = inst.code == 404 and errno.ENOENT or None
67 num = inst.code == 404 and errno.ENOENT or None
68 # Explicitly convert the exception to str as Py3 will try
68 # Explicitly convert the exception to str as Py3 will try
69 # convert it to local encoding and with as the HTTPResponse
69 # convert it to local encoding and with as the HTTPResponse
70 # instance doesn't support encode.
70 # instance doesn't support encode.
71 raise IOError(num, str(inst))
71 raise IOError(num, str(inst))
72 except urlerr.urlerror as inst:
72 except urlerr.urlerror as inst:
73 raise IOError(None, inst.reason)
73 raise IOError(None, inst.reason)
74
74
75 if code == 200:
75 if code == 200:
76 # HTTPRangeHandler does nothing if remote does not support
76 # HTTPRangeHandler does nothing if remote does not support
77 # Range headers and returns the full entity. Let's slice it.
77 # Range headers and returns the full entity. Let's slice it.
78 if bytes:
78 if bytes:
79 data = data[self.pos : self.pos + bytes]
79 data = data[self.pos : self.pos + bytes]
80 else:
80 else:
81 data = data[self.pos :]
81 data = data[self.pos :]
82 elif bytes:
82 elif bytes:
83 data = data[:bytes]
83 data = data[:bytes]
84 self.pos += len(data)
84 self.pos += len(data)
85 return data
85 return data
86
86
87 def readlines(self):
87 def readlines(self):
88 return self.read().splitlines(True)
88 return self.read().splitlines(True)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self.readlines())
91 return iter(self.readlines())
92
92
93 def close(self):
93 def close(self):
94 pass
94 pass
95
95
96
96
97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
98 # which was itself extracted from urlgrabber. See the last version of
98 # which was itself extracted from urlgrabber. See the last version of
99 # byterange.py from history if you need more information.
99 # byterange.py from history if you need more information.
100 class _RangeError(IOError):
100 class _RangeError(IOError):
101 """Error raised when an unsatisfiable range is requested."""
101 """Error raised when an unsatisfiable range is requested."""
102
102
103
103
104 class _HTTPRangeHandler(urlreq.basehandler):
104 class _HTTPRangeHandler(urlreq.basehandler):
105 """Handler that enables HTTP Range headers.
105 """Handler that enables HTTP Range headers.
106
106
107 This was extremely simple. The Range header is a HTTP feature to
107 This was extremely simple. The Range header is a HTTP feature to
108 begin with so all this class does is tell urllib2 that the
108 begin with so all this class does is tell urllib2 that the
109 "206 Partial Content" response from the HTTP server is what we
109 "206 Partial Content" response from the HTTP server is what we
110 expected.
110 expected.
111 """
111 """
112
112
113 def http_error_206(self, req, fp, code, msg, hdrs):
113 def http_error_206(self, req, fp, code, msg, hdrs):
114 # 206 Partial Content Response
114 # 206 Partial Content Response
115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
116 r.code = code
116 r.code = code
117 r.msg = msg
117 r.msg = msg
118 return r
118 return r
119
119
120 def http_error_416(self, req, fp, code, msg, hdrs):
120 def http_error_416(self, req, fp, code, msg, hdrs):
121 # HTTP's Range Not Satisfiable error
121 # HTTP's Range Not Satisfiable error
122 raise _RangeError(b'Requested Range Not Satisfiable')
122 raise _RangeError(b'Requested Range Not Satisfiable')
123
123
124
124
125 def build_opener(ui, authinfo):
125 def build_opener(ui, authinfo):
126 # urllib cannot handle URLs with embedded user or passwd
126 # urllib cannot handle URLs with embedded user or passwd
127 urlopener = url.opener(ui, authinfo)
127 urlopener = url.opener(ui, authinfo)
128 urlopener.add_handler(_HTTPRangeHandler())
128 urlopener.add_handler(_HTTPRangeHandler())
129
129
130 class statichttpvfs(vfsmod.abstractvfs):
130 class statichttpvfs(vfsmod.abstractvfs):
131 def __init__(self, base):
131 def __init__(self, base):
132 self.base = base
132 self.base = base
133 self.options = {}
133 self.options = {}
134
134
135 def __call__(self, path, mode=b'r', *args, **kw):
135 def __call__(self, path, mode=b'r', *args, **kw):
136 if mode not in (b'r', b'rb'):
136 if mode not in (b'r', b'rb'):
137 raise IOError(b'Permission denied')
137 raise IOError(b'Permission denied')
138 f = b"/".join((self.base, urlreq.quote(path)))
138 f = b"/".join((self.base, urlreq.quote(path)))
139 return httprangereader(f, urlopener)
139 return httprangereader(f, urlopener)
140
140
141 def join(self, path):
141 def join(self, path):
142 if path:
142 if path:
143 return pathutil.join(self.base, path)
143 return pathutil.join(self.base, path)
144 else:
144 else:
145 return self.base
145 return self.base
146
146
147 return statichttpvfs
147 return statichttpvfs
148
148
149
149
150 class statichttppeer(localrepo.localpeer):
150 class statichttppeer(localrepo.localpeer):
151 def local(self):
151 def local(self):
152 return None
152 return None
153
153
154 def canpush(self):
154 def canpush(self):
155 return False
155 return False
156
156
157
157
158 class statichttprepository(
158 class statichttprepository(
159 localrepo.localrepository, localrepo.revlogfilestorage
159 localrepo.localrepository, localrepo.revlogfilestorage
160 ):
160 ):
161 supported = localrepo.localrepository._basesupported
161 supported = localrepo.localrepository._basesupported
162
162
163 def __init__(self, ui, path):
163 def __init__(self, ui, path):
164 self._url = path
164 self._url = path
165 self.ui = ui
165 self.ui = ui
166
166
167 self.root = path
167 self.root = path
168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
169 self.path, authinfo = u.authinfo()
169 self.path, authinfo = u.authinfo()
170
170
171 vfsclass = build_opener(ui, authinfo)
171 vfsclass = build_opener(ui, authinfo)
172 self.vfs = vfsclass(self.path)
172 self.vfs = vfsclass(self.path)
173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
174 self._phasedefaults = []
174 self._phasedefaults = []
175
175
176 self.names = namespaces.namespaces()
176 self.names = namespaces.namespaces()
177 self.filtername = None
177 self.filtername = None
178 self._extrafilterid = None
178 self._extrafilterid = None
179 self._wanted_sidedata = set()
179 self._wanted_sidedata = set()
180 self.features = set()
180 self.features = set()
181
181
182 try:
182 try:
183 requirements = set(self.vfs.read(b'requires').splitlines())
183 requirements = set(self.vfs.read(b'requires').splitlines())
184 except FileNotFoundError:
184 except FileNotFoundError:
185 requirements = set()
185 requirements = set()
186
186
187 # check if it is a non-empty old-style repository
187 # check if it is a non-empty old-style repository
188 try:
188 try:
189 fp = self.vfs(b"00changelog.i")
189 fp = self.vfs(b"00changelog.i")
190 fp.read(1)
190 fp.read(1)
191 fp.close()
191 fp.close()
192 except FileNotFoundError:
192 except FileNotFoundError:
193 # we do not care about empty old-style repositories here
193 # we do not care about empty old-style repositories here
194 msg = _(b"'%s' does not appear to be an hg repository") % path
194 msg = _(b"'%s' does not appear to be an hg repository") % path
195 raise error.RepoError(msg)
195 raise error.RepoError(msg)
196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
197 storevfs = vfsclass(self.vfs.join(b'store'))
197 storevfs = vfsclass(self.vfs.join(b'store'))
198 requirements |= set(storevfs.read(b'requires').splitlines())
198 requirements |= set(storevfs.read(b'requires').splitlines())
199
199
200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
201 localrepo.ensurerequirementsrecognized(
201 localrepo.ensurerequirementsrecognized(
202 requirements, supportedrequirements
202 requirements, supportedrequirements
203 )
203 )
204 localrepo.ensurerequirementscompatible(ui, requirements)
204 localrepo.ensurerequirementscompatible(ui, requirements)
205 self.nodeconstants = sha1nodeconstants
205 self.nodeconstants = sha1nodeconstants
206 self.nullid = self.nodeconstants.nullid
206 self.nullid = self.nodeconstants.nullid
207
207
208 # setup store
208 # setup store
209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
210 self.spath = self.store.path
210 self.spath = self.store.path
211 self.svfs = self.store.opener
211 self.svfs = self.store.opener
212 self.sjoin = self.store.join
212 self.sjoin = self.store.join
213 self._filecache = {}
213 self._filecache = {}
214 self.requirements = requirements
214 self.requirements = requirements
215
215
216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
217 self.manifestlog = manifest.manifestlog(
217 self.manifestlog = manifest.manifestlog(
218 self.svfs, self, rootmanifest, self.narrowmatch()
218 self.svfs, self, rootmanifest, self.narrowmatch()
219 )
219 )
220 self.changelog = changelog.changelog(self.svfs)
220 self.changelog = changelog.changelog(self.svfs)
221 self._tags = None
221 self._tags = None
222 self.nodetagscache = None
222 self.nodetagscache = None
223 self._branchcaches = branchmap.BranchMapCache()
223 self._branchcaches = branchmap.BranchMapCache()
224 self._revbranchcache = None
224 self._revbranchcache = None
225 self.encodepats = None
225 self.encodepats = None
226 self.decodepats = None
226 self.decodepats = None
227 self._transref = None
227 self._transref = None
228
228
229 def _restrictcapabilities(self, caps):
229 def _restrictcapabilities(self, caps):
230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
231 return caps.difference([b"pushkey"])
231 return caps.difference([b"pushkey"])
232
232
233 def url(self):
233 def url(self):
234 return self._url
234 return self._url
235
235
236 def local(self):
236 def local(self):
237 return False
237 return False
238
238
239 def peer(self):
239 def peer(self):
240 return statichttppeer(self)
240 return statichttppeer(self)
241
241
242 def wlock(self, wait=True):
242 def wlock(self, wait=True):
243 raise error.LockUnavailable(
243 raise error.LockUnavailable(
244 0,
244 0,
245 _(b'lock not available'),
245 _(b'lock not available'),
246 b'lock',
246 b'lock',
247 _(b'cannot lock static-http repository'),
247 _(b'cannot lock static-http repository'),
248 )
248 )
249
249
250 def lock(self, wait=True):
250 def lock(self, wait=True):
251 raise error.LockUnavailable(
251 raise error.LockUnavailable(
252 0,
252 0,
253 _(b'lock not available'),
253 _(b'lock not available'),
254 b'lock',
254 b'lock',
255 _(b'cannot lock static-http repository'),
255 _(b'cannot lock static-http repository'),
256 )
256 )
257
257
258 def _writecaches(self):
258 def _writecaches(self):
259 pass # statichttprepository are read only
259 pass # statichttprepository are read only
260
260
261
261
262 def instance(ui, path, create, intents=None, createopts=None):
262 def instance(ui, path, create, intents=None, createopts=None):
263 if create:
263 if create:
264 raise error.Abort(_(b'cannot create new static-http repository'))
264 raise error.Abort(_(b'cannot create new static-http repository'))
265 return statichttprepository(ui, path[7:])
265 return statichttprepository(ui, path[7:]).peer()
General Comments 0
You need to be logged in to leave comments. Login now