##// END OF EJS Templates
clone: explicitly detect the need to fetch a peer...
marmoute -
r50642:8a38cd76 default
parent child Browse files
Show More
@@ -1,1664 +1,1669 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def _peerlookup(path):
164 def _peerlookup(path):
165 u = urlutil.url(path)
165 u = urlutil.url(path)
166 scheme = u.scheme or b'file'
166 scheme = u.scheme or b'file'
167 if scheme in peer_schemes:
167 if scheme in peer_schemes:
168 return peer_schemes[scheme]
168 return peer_schemes[scheme]
169 if scheme in repo_schemes:
169 if scheme in repo_schemes:
170 return repo_schemes[scheme]
170 return repo_schemes[scheme]
171 return LocalFactory
171 return LocalFactory
172
172
173
173
174 def islocal(repo):
174 def islocal(repo):
175 '''return true if repo (or path pointing to repo) is local'''
175 '''return true if repo (or path pointing to repo) is local'''
176 if isinstance(repo, bytes):
176 if isinstance(repo, bytes):
177 cls = _peerlookup(repo)
177 cls = _peerlookup(repo)
178 cls.instance # make sure we load the module
178 cls.instance # make sure we load the module
179 if util.safehasattr(cls, 'islocal'):
179 if util.safehasattr(cls, 'islocal'):
180 return cls.islocal(repo) # pytype: disable=module-attr
180 return cls.islocal(repo) # pytype: disable=module-attr
181 return False
181 return False
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
182 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
183 return repo.local()
183 return repo.local()
184
184
185
185
186 def openpath(ui, path, sendaccept=True):
186 def openpath(ui, path, sendaccept=True):
187 '''open path with open if local, url.open if remote'''
187 '''open path with open if local, url.open if remote'''
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
188 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
189 if pathurl.islocal():
189 if pathurl.islocal():
190 return util.posixfile(pathurl.localpath(), b'rb')
190 return util.posixfile(pathurl.localpath(), b'rb')
191 else:
191 else:
192 return url.open(ui, path, sendaccept=sendaccept)
192 return url.open(ui, path, sendaccept=sendaccept)
193
193
194
194
195 # a list of (ui, repo) functions called for wire peer initialization
195 # a list of (ui, repo) functions called for wire peer initialization
196 wirepeersetupfuncs = []
196 wirepeersetupfuncs = []
197
197
198
198
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
199 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
200 ui = getattr(obj, "ui", ui)
200 ui = getattr(obj, "ui", ui)
201 for f in presetupfuncs or []:
201 for f in presetupfuncs or []:
202 f(ui, obj)
202 f(ui, obj)
203 ui.log(b'extension', b'- executing reposetup hooks\n')
203 ui.log(b'extension', b'- executing reposetup hooks\n')
204 with util.timedcm('all reposetup') as allreposetupstats:
204 with util.timedcm('all reposetup') as allreposetupstats:
205 for name, module in extensions.extensions(ui):
205 for name, module in extensions.extensions(ui):
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
206 ui.log(b'extension', b' - running reposetup for %s\n', name)
207 hook = getattr(module, 'reposetup', None)
207 hook = getattr(module, 'reposetup', None)
208 if hook:
208 if hook:
209 with util.timedcm('reposetup %r', name) as stats:
209 with util.timedcm('reposetup %r', name) as stats:
210 hook(ui, obj)
210 hook(ui, obj)
211 msg = b' > reposetup for %s took %s\n'
211 msg = b' > reposetup for %s took %s\n'
212 ui.log(b'extension', msg, name, stats)
212 ui.log(b'extension', msg, name, stats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
213 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
214 if not obj.local():
214 if not obj.local():
215 for f in wirepeersetupfuncs:
215 for f in wirepeersetupfuncs:
216 f(ui, obj)
216 f(ui, obj)
217
217
218
218
219 def repository(
219 def repository(
220 ui,
220 ui,
221 path=b'',
221 path=b'',
222 create=False,
222 create=False,
223 presetupfuncs=None,
223 presetupfuncs=None,
224 intents=None,
224 intents=None,
225 createopts=None,
225 createopts=None,
226 ):
226 ):
227 """return a repository object for the specified path"""
227 """return a repository object for the specified path"""
228 scheme = urlutil.url(path).scheme
228 scheme = urlutil.url(path).scheme
229 if scheme is None:
229 if scheme is None:
230 scheme = b'file'
230 scheme = b'file'
231 cls = repo_schemes.get(scheme)
231 cls = repo_schemes.get(scheme)
232 if cls is None:
232 if cls is None:
233 if scheme in peer_schemes:
233 if scheme in peer_schemes:
234 raise error.Abort(_(b"repository '%s' is not local") % path)
234 raise error.Abort(_(b"repository '%s' is not local") % path)
235 cls = LocalFactory
235 cls = LocalFactory
236 repo = cls.instance(
236 repo = cls.instance(
237 ui,
237 ui,
238 path,
238 path,
239 create,
239 create,
240 intents=intents,
240 intents=intents,
241 createopts=createopts,
241 createopts=createopts,
242 )
242 )
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
243 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
244 return repo.filtered(b'visible')
244 return repo.filtered(b'visible')
245
245
246
246
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
247 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
248 '''return a repository peer for the specified path'''
248 '''return a repository peer for the specified path'''
249 rui = remoteui(uiorrepo, opts)
249 rui = remoteui(uiorrepo, opts)
250 if util.safehasattr(path, 'url'):
250 if util.safehasattr(path, 'url'):
251 # this is a urlutil.path object
251 # this is a urlutil.path object
252 scheme = path.url.scheme # pytype: disable=attribute-error
252 scheme = path.url.scheme # pytype: disable=attribute-error
253 # XXX for now we don't do anything more than that
253 # XXX for now we don't do anything more than that
254 path = path.loc # pytype: disable=attribute-error
254 path = path.loc # pytype: disable=attribute-error
255 else:
255 else:
256 scheme = urlutil.url(path).scheme
256 scheme = urlutil.url(path).scheme
257 if scheme in peer_schemes:
257 if scheme in peer_schemes:
258 cls = peer_schemes[scheme]
258 cls = peer_schemes[scheme]
259 peer = cls.instance(
259 peer = cls.instance(
260 rui,
260 rui,
261 path,
261 path,
262 create,
262 create,
263 intents=intents,
263 intents=intents,
264 createopts=createopts,
264 createopts=createopts,
265 )
265 )
266 _setup_repo_or_peer(rui, peer)
266 _setup_repo_or_peer(rui, peer)
267 else:
267 else:
268 # this is a repository
268 # this is a repository
269 repo = repository(
269 repo = repository(
270 rui,
270 rui,
271 path,
271 path,
272 create,
272 create,
273 intents=intents,
273 intents=intents,
274 createopts=createopts,
274 createopts=createopts,
275 )
275 )
276 peer = repo.peer()
276 peer = repo.peer()
277 return peer
277 return peer
278
278
279
279
280 def defaultdest(source):
280 def defaultdest(source):
281 """return default destination of clone if none is given
281 """return default destination of clone if none is given
282
282
283 >>> defaultdest(b'foo')
283 >>> defaultdest(b'foo')
284 'foo'
284 'foo'
285 >>> defaultdest(b'/foo/bar')
285 >>> defaultdest(b'/foo/bar')
286 'bar'
286 'bar'
287 >>> defaultdest(b'/')
287 >>> defaultdest(b'/')
288 ''
288 ''
289 >>> defaultdest(b'')
289 >>> defaultdest(b'')
290 ''
290 ''
291 >>> defaultdest(b'http://example.org/')
291 >>> defaultdest(b'http://example.org/')
292 ''
292 ''
293 >>> defaultdest(b'http://example.org/foo/')
293 >>> defaultdest(b'http://example.org/foo/')
294 'foo'
294 'foo'
295 """
295 """
296 path = urlutil.url(source).path
296 path = urlutil.url(source).path
297 if not path:
297 if not path:
298 return b''
298 return b''
299 return os.path.basename(os.path.normpath(path))
299 return os.path.basename(os.path.normpath(path))
300
300
301
301
302 def sharedreposource(repo):
302 def sharedreposource(repo):
303 """Returns repository object for source repository of a shared repo.
303 """Returns repository object for source repository of a shared repo.
304
304
305 If repo is not a shared repository, returns None.
305 If repo is not a shared repository, returns None.
306 """
306 """
307 if repo.sharedpath == repo.path:
307 if repo.sharedpath == repo.path:
308 return None
308 return None
309
309
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 return repo.srcrepo
311 return repo.srcrepo
312
312
313 # the sharedpath always ends in the .hg; we want the path to the repo
313 # the sharedpath always ends in the .hg; we want the path to the repo
314 source = repo.vfs.split(repo.sharedpath)[0]
314 source = repo.vfs.split(repo.sharedpath)[0]
315 srcurl, branches = urlutil.parseurl(source)
315 srcurl, branches = urlutil.parseurl(source)
316 srcrepo = repository(repo.ui, srcurl)
316 srcrepo = repository(repo.ui, srcurl)
317 repo.srcrepo = srcrepo
317 repo.srcrepo = srcrepo
318 return srcrepo
318 return srcrepo
319
319
320
320
321 def share(
321 def share(
322 ui,
322 ui,
323 source,
323 source,
324 dest=None,
324 dest=None,
325 update=True,
325 update=True,
326 bookmarks=True,
326 bookmarks=True,
327 defaultpath=None,
327 defaultpath=None,
328 relative=False,
328 relative=False,
329 ):
329 ):
330 '''create a shared repository'''
330 '''create a shared repository'''
331
331
332 not_local_msg = _(b'can only share local repositories')
332 not_local_msg = _(b'can only share local repositories')
333 if util.safehasattr(source, 'local'):
333 if util.safehasattr(source, 'local'):
334 if source.local() is None:
334 if source.local() is None:
335 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
336 elif not islocal(source):
336 elif not islocal(source):
337 # XXX why are we getting bytes here ?
337 # XXX why are we getting bytes here ?
338 raise error.Abort(not_local_msg)
338 raise error.Abort(not_local_msg)
339
339
340 if not dest:
340 if not dest:
341 dest = defaultdest(source)
341 dest = defaultdest(source)
342 else:
342 else:
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
343 dest = urlutil.get_clone_path_obj(ui, dest).loc
344
344
345 if isinstance(source, bytes):
345 if isinstance(source, bytes):
346 source_path = urlutil.get_clone_path_obj(ui, source)
346 source_path = urlutil.get_clone_path_obj(ui, source)
347 srcrepo = repository(ui, source_path.loc)
347 srcrepo = repository(ui, source_path.loc)
348 branches = (source_path.branch, [])
348 branches = (source_path.branch, [])
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
349 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 else:
350 else:
351 srcrepo = source.local()
351 srcrepo = source.local()
352 checkout = None
352 checkout = None
353
353
354 shareditems = set()
354 shareditems = set()
355 if bookmarks:
355 if bookmarks:
356 shareditems.add(sharedbookmarks)
356 shareditems.add(sharedbookmarks)
357
357
358 r = repository(
358 r = repository(
359 ui,
359 ui,
360 dest,
360 dest,
361 create=True,
361 create=True,
362 createopts={
362 createopts={
363 b'sharedrepo': srcrepo,
363 b'sharedrepo': srcrepo,
364 b'sharedrelative': relative,
364 b'sharedrelative': relative,
365 b'shareditems': shareditems,
365 b'shareditems': shareditems,
366 },
366 },
367 )
367 )
368
368
369 postshare(srcrepo, r, defaultpath=defaultpath)
369 postshare(srcrepo, r, defaultpath=defaultpath)
370 r = repository(ui, dest)
370 r = repository(ui, dest)
371 _postshareupdate(r, update, checkout=checkout)
371 _postshareupdate(r, update, checkout=checkout)
372 return r
372 return r
373
373
374
374
375 def _prependsourcehgrc(repo):
375 def _prependsourcehgrc(repo):
376 """copies the source repo config and prepend it in current repo .hg/hgrc
376 """copies the source repo config and prepend it in current repo .hg/hgrc
377 on unshare. This is only done if the share was perfomed using share safe
377 on unshare. This is only done if the share was perfomed using share safe
378 method where we share config of source in shares"""
378 method where we share config of source in shares"""
379 srcvfs = vfsmod.vfs(repo.sharedpath)
379 srcvfs = vfsmod.vfs(repo.sharedpath)
380 dstvfs = vfsmod.vfs(repo.path)
380 dstvfs = vfsmod.vfs(repo.path)
381
381
382 if not srcvfs.exists(b'hgrc'):
382 if not srcvfs.exists(b'hgrc'):
383 return
383 return
384
384
385 currentconfig = b''
385 currentconfig = b''
386 if dstvfs.exists(b'hgrc'):
386 if dstvfs.exists(b'hgrc'):
387 currentconfig = dstvfs.read(b'hgrc')
387 currentconfig = dstvfs.read(b'hgrc')
388
388
389 with dstvfs(b'hgrc', b'wb') as fp:
389 with dstvfs(b'hgrc', b'wb') as fp:
390 sourceconfig = srcvfs.read(b'hgrc')
390 sourceconfig = srcvfs.read(b'hgrc')
391 fp.write(b"# Config copied from shared source\n")
391 fp.write(b"# Config copied from shared source\n")
392 fp.write(sourceconfig)
392 fp.write(sourceconfig)
393 fp.write(b'\n')
393 fp.write(b'\n')
394 fp.write(currentconfig)
394 fp.write(currentconfig)
395
395
396
396
397 def unshare(ui, repo):
397 def unshare(ui, repo):
398 """convert a shared repository to a normal one
398 """convert a shared repository to a normal one
399
399
400 Copy the store data to the repo and remove the sharedpath data.
400 Copy the store data to the repo and remove the sharedpath data.
401
401
402 Returns a new repository object representing the unshared repository.
402 Returns a new repository object representing the unshared repository.
403
403
404 The passed repository object is not usable after this function is
404 The passed repository object is not usable after this function is
405 called.
405 called.
406 """
406 """
407
407
408 with repo.lock():
408 with repo.lock():
409 # we use locks here because if we race with commit, we
409 # we use locks here because if we race with commit, we
410 # can end up with extra data in the cloned revlogs that's
410 # can end up with extra data in the cloned revlogs that's
411 # not pointed to by changesets, thus causing verify to
411 # not pointed to by changesets, thus causing verify to
412 # fail
412 # fail
413 destlock = copystore(ui, repo, repo.path)
413 destlock = copystore(ui, repo, repo.path)
414 with destlock or util.nullcontextmanager():
414 with destlock or util.nullcontextmanager():
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
415 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 # we were sharing .hg/hgrc of the share source with the current
416 # we were sharing .hg/hgrc of the share source with the current
417 # repo. We need to copy that while unsharing otherwise it can
417 # repo. We need to copy that while unsharing otherwise it can
418 # disable hooks and other checks
418 # disable hooks and other checks
419 _prependsourcehgrc(repo)
419 _prependsourcehgrc(repo)
420
420
421 sharefile = repo.vfs.join(b'sharedpath')
421 sharefile = repo.vfs.join(b'sharedpath')
422 util.rename(sharefile, sharefile + b'.old')
422 util.rename(sharefile, sharefile + b'.old')
423
423
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
424 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 scmutil.writereporequirements(repo)
426 scmutil.writereporequirements(repo)
427
427
428 # Removing share changes some fundamental properties of the repo instance.
428 # Removing share changes some fundamental properties of the repo instance.
429 # So we instantiate a new repo object and operate on it rather than
429 # So we instantiate a new repo object and operate on it rather than
430 # try to keep the existing repo usable.
430 # try to keep the existing repo usable.
431 newrepo = repository(repo.baseui, repo.root, create=False)
431 newrepo = repository(repo.baseui, repo.root, create=False)
432
432
433 # TODO: figure out how to access subrepos that exist, but were previously
433 # TODO: figure out how to access subrepos that exist, but were previously
434 # removed from .hgsub
434 # removed from .hgsub
435 c = newrepo[b'.']
435 c = newrepo[b'.']
436 subs = c.substate
436 subs = c.substate
437 for s in sorted(subs):
437 for s in sorted(subs):
438 c.sub(s).unshare()
438 c.sub(s).unshare()
439
439
440 localrepo.poisonrepository(repo)
440 localrepo.poisonrepository(repo)
441
441
442 return newrepo
442 return newrepo
443
443
444
444
445 def postshare(sourcerepo, destrepo, defaultpath=None):
445 def postshare(sourcerepo, destrepo, defaultpath=None):
446 """Called after a new shared repo is created.
446 """Called after a new shared repo is created.
447
447
448 The new repo only has a requirements file and pointer to the source.
448 The new repo only has a requirements file and pointer to the source.
449 This function configures additional shared data.
449 This function configures additional shared data.
450
450
451 Extensions can wrap this function and write additional entries to
451 Extensions can wrap this function and write additional entries to
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
452 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 """
453 """
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
454 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 if default:
455 if default:
456 template = b'[paths]\ndefault = %s\n'
456 template = b'[paths]\ndefault = %s\n'
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
457 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
458 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 with destrepo.wlock():
459 with destrepo.wlock():
460 narrowspec.copytoworkingcopy(destrepo)
460 narrowspec.copytoworkingcopy(destrepo)
461
461
462
462
463 def _postshareupdate(repo, update, checkout=None):
463 def _postshareupdate(repo, update, checkout=None):
464 """Maybe perform a working directory update after a shared repo is created.
464 """Maybe perform a working directory update after a shared repo is created.
465
465
466 ``update`` can be a boolean or a revision to update to.
466 ``update`` can be a boolean or a revision to update to.
467 """
467 """
468 if not update:
468 if not update:
469 return
469 return
470
470
471 repo.ui.status(_(b"updating working directory\n"))
471 repo.ui.status(_(b"updating working directory\n"))
472 if update is not True:
472 if update is not True:
473 checkout = update
473 checkout = update
474 for test in (checkout, b'default', b'tip'):
474 for test in (checkout, b'default', b'tip'):
475 if test is None:
475 if test is None:
476 continue
476 continue
477 try:
477 try:
478 uprev = repo.lookup(test)
478 uprev = repo.lookup(test)
479 break
479 break
480 except error.RepoLookupError:
480 except error.RepoLookupError:
481 continue
481 continue
482 _update(repo, uprev)
482 _update(repo, uprev)
483
483
484
484
485 def copystore(ui, srcrepo, destpath):
485 def copystore(ui, srcrepo, destpath):
486 """copy files from store of srcrepo in destpath
486 """copy files from store of srcrepo in destpath
487
487
488 returns destlock
488 returns destlock
489 """
489 """
490 destlock = None
490 destlock = None
491 try:
491 try:
492 hardlink = None
492 hardlink = None
493 topic = _(b'linking') if hardlink else _(b'copying')
493 topic = _(b'linking') if hardlink else _(b'copying')
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
494 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 num = 0
495 num = 0
496 srcpublishing = srcrepo.publishing()
496 srcpublishing = srcrepo.publishing()
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
497 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 dstvfs = vfsmod.vfs(destpath)
498 dstvfs = vfsmod.vfs(destpath)
499 for f in srcrepo.store.copylist():
499 for f in srcrepo.store.copylist():
500 if srcpublishing and f.endswith(b'phaseroots'):
500 if srcpublishing and f.endswith(b'phaseroots'):
501 continue
501 continue
502 dstbase = os.path.dirname(f)
502 dstbase = os.path.dirname(f)
503 if dstbase and not dstvfs.exists(dstbase):
503 if dstbase and not dstvfs.exists(dstbase):
504 dstvfs.mkdir(dstbase)
504 dstvfs.mkdir(dstbase)
505 if srcvfs.exists(f):
505 if srcvfs.exists(f):
506 if f.endswith(b'data'):
506 if f.endswith(b'data'):
507 # 'dstbase' may be empty (e.g. revlog format 0)
507 # 'dstbase' may be empty (e.g. revlog format 0)
508 lockfile = os.path.join(dstbase, b"lock")
508 lockfile = os.path.join(dstbase, b"lock")
509 # lock to avoid premature writing to the target
509 # lock to avoid premature writing to the target
510 destlock = lock.lock(dstvfs, lockfile)
510 destlock = lock.lock(dstvfs, lockfile)
511 hardlink, n = util.copyfiles(
511 hardlink, n = util.copyfiles(
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
512 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 )
513 )
514 num += n
514 num += n
515 if hardlink:
515 if hardlink:
516 ui.debug(b"linked %d files\n" % num)
516 ui.debug(b"linked %d files\n" % num)
517 else:
517 else:
518 ui.debug(b"copied %d files\n" % num)
518 ui.debug(b"copied %d files\n" % num)
519 return destlock
519 return destlock
520 except: # re-raises
520 except: # re-raises
521 release(destlock)
521 release(destlock)
522 raise
522 raise
523
523
524
524
525 def clonewithshare(
525 def clonewithshare(
526 ui,
526 ui,
527 peeropts,
527 peeropts,
528 sharepath,
528 sharepath,
529 source,
529 source,
530 srcpeer,
530 srcpeer,
531 dest,
531 dest,
532 pull=False,
532 pull=False,
533 rev=None,
533 rev=None,
534 update=True,
534 update=True,
535 stream=False,
535 stream=False,
536 ):
536 ):
537 """Perform a clone using a shared repo.
537 """Perform a clone using a shared repo.
538
538
539 The store for the repository will be located at <sharepath>/.hg. The
539 The store for the repository will be located at <sharepath>/.hg. The
540 specified revisions will be cloned or pulled from "source". A shared repo
540 specified revisions will be cloned or pulled from "source". A shared repo
541 will be created at "dest" and a working copy will be created if "update" is
541 will be created at "dest" and a working copy will be created if "update" is
542 True.
542 True.
543 """
543 """
544 revs = None
544 revs = None
545 if rev:
545 if rev:
546 if not srcpeer.capable(b'lookup'):
546 if not srcpeer.capable(b'lookup'):
547 raise error.Abort(
547 raise error.Abort(
548 _(
548 _(
549 b"src repository does not support "
549 b"src repository does not support "
550 b"revision lookup and so doesn't "
550 b"revision lookup and so doesn't "
551 b"support clone by revision"
551 b"support clone by revision"
552 )
552 )
553 )
553 )
554
554
555 # TODO this is batchable.
555 # TODO this is batchable.
556 remoterevs = []
556 remoterevs = []
557 for r in rev:
557 for r in rev:
558 with srcpeer.commandexecutor() as e:
558 with srcpeer.commandexecutor() as e:
559 remoterevs.append(
559 remoterevs.append(
560 e.callcommand(
560 e.callcommand(
561 b'lookup',
561 b'lookup',
562 {
562 {
563 b'key': r,
563 b'key': r,
564 },
564 },
565 ).result()
565 ).result()
566 )
566 )
567 revs = remoterevs
567 revs = remoterevs
568
568
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
569 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # 2 clients may race creating or populating it.
570 # 2 clients may race creating or populating it.
571 pooldir = os.path.dirname(sharepath)
571 pooldir = os.path.dirname(sharepath)
572 # lock class requires the directory to exist.
572 # lock class requires the directory to exist.
573 try:
573 try:
574 util.makedir(pooldir, False)
574 util.makedir(pooldir, False)
575 except FileExistsError:
575 except FileExistsError:
576 pass
576 pass
577
577
578 poolvfs = vfsmod.vfs(pooldir)
578 poolvfs = vfsmod.vfs(pooldir)
579 basename = os.path.basename(sharepath)
579 basename = os.path.basename(sharepath)
580
580
581 with lock.lock(poolvfs, b'%s.lock' % basename):
581 with lock.lock(poolvfs, b'%s.lock' % basename):
582 if os.path.exists(sharepath):
582 if os.path.exists(sharepath):
583 ui.status(
583 ui.status(
584 _(b'(sharing from existing pooled repository %s)\n') % basename
584 _(b'(sharing from existing pooled repository %s)\n') % basename
585 )
585 )
586 else:
586 else:
587 ui.status(
587 ui.status(
588 _(b'(sharing from new pooled repository %s)\n') % basename
588 _(b'(sharing from new pooled repository %s)\n') % basename
589 )
589 )
590 # Always use pull mode because hardlinks in share mode don't work
590 # Always use pull mode because hardlinks in share mode don't work
591 # well. Never update because working copies aren't necessary in
591 # well. Never update because working copies aren't necessary in
592 # share mode.
592 # share mode.
593 clone(
593 clone(
594 ui,
594 ui,
595 peeropts,
595 peeropts,
596 source,
596 source,
597 dest=sharepath,
597 dest=sharepath,
598 pull=True,
598 pull=True,
599 revs=rev,
599 revs=rev,
600 update=False,
600 update=False,
601 stream=stream,
601 stream=stream,
602 )
602 )
603
603
604 # Resolve the value to put in [paths] section for the source.
604 # Resolve the value to put in [paths] section for the source.
605 if islocal(source):
605 if islocal(source):
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
606 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 else:
607 else:
608 defaultpath = source
608 defaultpath = source
609
609
610 sharerepo = repository(ui, path=sharepath)
610 sharerepo = repository(ui, path=sharepath)
611 destrepo = share(
611 destrepo = share(
612 ui,
612 ui,
613 sharerepo,
613 sharerepo,
614 dest=dest,
614 dest=dest,
615 update=False,
615 update=False,
616 bookmarks=False,
616 bookmarks=False,
617 defaultpath=defaultpath,
617 defaultpath=defaultpath,
618 )
618 )
619
619
620 # We need to perform a pull against the dest repo to fetch bookmarks
620 # We need to perform a pull against the dest repo to fetch bookmarks
621 # and other non-store data that isn't shared by default. In the case of
621 # and other non-store data that isn't shared by default. In the case of
622 # non-existing shared repo, this means we pull from the remote twice. This
622 # non-existing shared repo, this means we pull from the remote twice. This
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
623 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # way to pull just non-changegroup data.
624 # way to pull just non-changegroup data.
625 exchange.pull(destrepo, srcpeer, heads=revs)
625 exchange.pull(destrepo, srcpeer, heads=revs)
626
626
627 _postshareupdate(destrepo, update)
627 _postshareupdate(destrepo, update)
628
628
629 return srcpeer, peer(ui, peeropts, dest)
629 return srcpeer, peer(ui, peeropts, dest)
630
630
631
631
632 # Recomputing caches is often slow on big repos, so copy them.
632 # Recomputing caches is often slow on big repos, so copy them.
633 def _copycache(srcrepo, dstcachedir, fname):
633 def _copycache(srcrepo, dstcachedir, fname):
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
634 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 srcfname = srcrepo.cachevfs.join(fname)
635 srcfname = srcrepo.cachevfs.join(fname)
636 dstfname = os.path.join(dstcachedir, fname)
636 dstfname = os.path.join(dstcachedir, fname)
637 if os.path.exists(srcfname):
637 if os.path.exists(srcfname):
638 if not os.path.exists(dstcachedir):
638 if not os.path.exists(dstcachedir):
639 os.mkdir(dstcachedir)
639 os.mkdir(dstcachedir)
640 util.copyfile(srcfname, dstfname)
640 util.copyfile(srcfname, dstfname)
641
641
642
642
643 def clone(
643 def clone(
644 ui,
644 ui,
645 peeropts,
645 peeropts,
646 source,
646 source,
647 dest=None,
647 dest=None,
648 pull=False,
648 pull=False,
649 revs=None,
649 revs=None,
650 update=True,
650 update=True,
651 stream=False,
651 stream=False,
652 branch=None,
652 branch=None,
653 shareopts=None,
653 shareopts=None,
654 storeincludepats=None,
654 storeincludepats=None,
655 storeexcludepats=None,
655 storeexcludepats=None,
656 depth=None,
656 depth=None,
657 ):
657 ):
658 """Make a copy of an existing repository.
658 """Make a copy of an existing repository.
659
659
660 Create a copy of an existing repository in a new directory. The
660 Create a copy of an existing repository in a new directory. The
661 source and destination are URLs, as passed to the repository
661 source and destination are URLs, as passed to the repository
662 function. Returns a pair of repository peers, the source and
662 function. Returns a pair of repository peers, the source and
663 newly created destination.
663 newly created destination.
664
664
665 The location of the source is added to the new repository's
665 The location of the source is added to the new repository's
666 .hg/hgrc file, as the default to be used for future pulls and
666 .hg/hgrc file, as the default to be used for future pulls and
667 pushes.
667 pushes.
668
668
669 If an exception is raised, the partly cloned/updated destination
669 If an exception is raised, the partly cloned/updated destination
670 repository will be deleted.
670 repository will be deleted.
671
671
672 Arguments:
672 Arguments:
673
673
674 source: repository object or URL
674 source: repository object or URL
675
675
676 dest: URL of destination repository to create (defaults to base
676 dest: URL of destination repository to create (defaults to base
677 name of source repository)
677 name of source repository)
678
678
679 pull: always pull from source repository, even in local case or if the
679 pull: always pull from source repository, even in local case or if the
680 server prefers streaming
680 server prefers streaming
681
681
682 stream: stream raw data uncompressed from repository (fast over
682 stream: stream raw data uncompressed from repository (fast over
683 LAN, slow over WAN)
683 LAN, slow over WAN)
684
684
685 revs: revision to clone up to (implies pull=True)
685 revs: revision to clone up to (implies pull=True)
686
686
687 update: update working directory after clone completes, if
687 update: update working directory after clone completes, if
688 destination is local repository (True means update to default rev,
688 destination is local repository (True means update to default rev,
689 anything else is treated as a revision)
689 anything else is treated as a revision)
690
690
691 branch: branches to clone
691 branch: branches to clone
692
692
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
693 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 activates auto sharing mode and defines the directory for stores. The
694 activates auto sharing mode and defines the directory for stores. The
695 "mode" key determines how to construct the directory name of the shared
695 "mode" key determines how to construct the directory name of the shared
696 repository. "identity" means the name is derived from the node of the first
696 repository. "identity" means the name is derived from the node of the first
697 changeset in the repository. "remote" means the name is derived from the
697 changeset in the repository. "remote" means the name is derived from the
698 remote's path/URL. Defaults to "identity."
698 remote's path/URL. Defaults to "identity."
699
699
700 storeincludepats and storeexcludepats: sets of file patterns to include and
700 storeincludepats and storeexcludepats: sets of file patterns to include and
701 exclude in the repository copy, respectively. If not defined, all files
701 exclude in the repository copy, respectively. If not defined, all files
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
702 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 only the requested files will be performed. If ``storeincludepats`` is not
703 only the requested files will be performed. If ``storeincludepats`` is not
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
704 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 ``path:.``. If both are empty sets, no files will be cloned.
705 ``path:.``. If both are empty sets, no files will be cloned.
706 """
706 """
707
707
708 if isinstance(source, bytes):
708 if isinstance(source, bytes):
709 src_path = urlutil.get_clone_path_obj(ui, source)
709 src_path = urlutil.get_clone_path_obj(ui, source)
710 if src_path is None:
710 if src_path is None:
711 srcpeer = peer(ui, peeropts, b'')
711 srcpeer = peer(ui, peeropts, b'')
712 origsource = source = b''
712 origsource = source = b''
713 branches = (None, branch or [])
713 branches = (None, branch or [])
714 else:
714 else:
715 srcpeer = peer(ui, peeropts, src_path)
715 srcpeer = peer(ui, peeropts, src_path)
716 origsource = src_path.rawloc
716 origsource = src_path.rawloc
717 branches = (src_path.branch, branch or [])
717 branches = (src_path.branch, branch or [])
718 source = src_path.loc
718 source = src_path.loc
719 else:
719 else:
720 if util.safehasattr(source, 'peer'):
721 srcpeer = source.peer() # in case we were called with a localrepo
722 else:
723 srcpeer = source
724 branches = (None, branch or [])
720 # XXX path: simply use the peer `path` object when this become available
725 # XXX path: simply use the peer `path` object when this become available
721 srcpeer = source.peer() # in case we were called with a localrepo
726 srcpeer = source.peer() # in case we were called with a localrepo
722 branches = (None, branch or [])
727 branches = (None, branch or [])
723 origsource = source = srcpeer.url()
728 origsource = source = srcpeer.url()
724 srclock = destlock = destwlock = cleandir = None
729 srclock = destlock = destwlock = cleandir = None
725 destpeer = None
730 destpeer = None
726 try:
731 try:
727 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
732 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
728
733
729 if dest is None:
734 if dest is None:
730 dest = defaultdest(source)
735 dest = defaultdest(source)
731 if dest:
736 if dest:
732 ui.status(_(b"destination directory: %s\n") % dest)
737 ui.status(_(b"destination directory: %s\n") % dest)
733 else:
738 else:
734 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 dest_path = urlutil.get_clone_path_obj(ui, dest)
735 if dest_path is not None:
740 if dest_path is not None:
736 dest = dest_path.rawloc
741 dest = dest_path.rawloc
737 else:
742 else:
738 dest = b''
743 dest = b''
739
744
740 dest = urlutil.urllocalpath(dest)
745 dest = urlutil.urllocalpath(dest)
741 source = urlutil.urllocalpath(source)
746 source = urlutil.urllocalpath(source)
742
747
743 if not dest:
748 if not dest:
744 raise error.InputError(_(b"empty destination path is not valid"))
749 raise error.InputError(_(b"empty destination path is not valid"))
745
750
746 destvfs = vfsmod.vfs(dest, expandpath=True)
751 destvfs = vfsmod.vfs(dest, expandpath=True)
747 if destvfs.lexists():
752 if destvfs.lexists():
748 if not destvfs.isdir():
753 if not destvfs.isdir():
749 raise error.InputError(
754 raise error.InputError(
750 _(b"destination '%s' already exists") % dest
755 _(b"destination '%s' already exists") % dest
751 )
756 )
752 elif destvfs.listdir():
757 elif destvfs.listdir():
753 raise error.InputError(
758 raise error.InputError(
754 _(b"destination '%s' is not empty") % dest
759 _(b"destination '%s' is not empty") % dest
755 )
760 )
756
761
757 createopts = {}
762 createopts = {}
758 narrow = False
763 narrow = False
759
764
760 if storeincludepats is not None:
765 if storeincludepats is not None:
761 narrowspec.validatepatterns(storeincludepats)
766 narrowspec.validatepatterns(storeincludepats)
762 narrow = True
767 narrow = True
763
768
764 if storeexcludepats is not None:
769 if storeexcludepats is not None:
765 narrowspec.validatepatterns(storeexcludepats)
770 narrowspec.validatepatterns(storeexcludepats)
766 narrow = True
771 narrow = True
767
772
768 if narrow:
773 if narrow:
769 # Include everything by default if only exclusion patterns defined.
774 # Include everything by default if only exclusion patterns defined.
770 if storeexcludepats and not storeincludepats:
775 if storeexcludepats and not storeincludepats:
771 storeincludepats = {b'path:.'}
776 storeincludepats = {b'path:.'}
772
777
773 createopts[b'narrowfiles'] = True
778 createopts[b'narrowfiles'] = True
774
779
775 if depth:
780 if depth:
776 createopts[b'shallowfilestore'] = True
781 createopts[b'shallowfilestore'] = True
777
782
778 if srcpeer.capable(b'lfs-serve'):
783 if srcpeer.capable(b'lfs-serve'):
779 # Repository creation honors the config if it disabled the extension, so
784 # Repository creation honors the config if it disabled the extension, so
780 # we can't just announce that lfs will be enabled. This check avoids
785 # we can't just announce that lfs will be enabled. This check avoids
781 # saying that lfs will be enabled, and then saying it's an unknown
786 # saying that lfs will be enabled, and then saying it's an unknown
782 # feature. The lfs creation option is set in either case so that a
787 # feature. The lfs creation option is set in either case so that a
783 # requirement is added. If the extension is explicitly disabled but the
788 # requirement is added. If the extension is explicitly disabled but the
784 # requirement is set, the clone aborts early, before transferring any
789 # requirement is set, the clone aborts early, before transferring any
785 # data.
790 # data.
786 createopts[b'lfs'] = True
791 createopts[b'lfs'] = True
787
792
788 if extensions.disabled_help(b'lfs'):
793 if extensions.disabled_help(b'lfs'):
789 ui.status(
794 ui.status(
790 _(
795 _(
791 b'(remote is using large file support (lfs), but it is '
796 b'(remote is using large file support (lfs), but it is '
792 b'explicitly disabled in the local configuration)\n'
797 b'explicitly disabled in the local configuration)\n'
793 )
798 )
794 )
799 )
795 else:
800 else:
796 ui.status(
801 ui.status(
797 _(
802 _(
798 b'(remote is using large file support (lfs); lfs will '
803 b'(remote is using large file support (lfs); lfs will '
799 b'be enabled for this repository)\n'
804 b'be enabled for this repository)\n'
800 )
805 )
801 )
806 )
802
807
803 shareopts = shareopts or {}
808 shareopts = shareopts or {}
804 sharepool = shareopts.get(b'pool')
809 sharepool = shareopts.get(b'pool')
805 sharenamemode = shareopts.get(b'mode')
810 sharenamemode = shareopts.get(b'mode')
806 if sharepool and islocal(dest):
811 if sharepool and islocal(dest):
807 sharepath = None
812 sharepath = None
808 if sharenamemode == b'identity':
813 if sharenamemode == b'identity':
809 # Resolve the name from the initial changeset in the remote
814 # Resolve the name from the initial changeset in the remote
810 # repository. This returns nullid when the remote is empty. It
815 # repository. This returns nullid when the remote is empty. It
811 # raises RepoLookupError if revision 0 is filtered or otherwise
816 # raises RepoLookupError if revision 0 is filtered or otherwise
812 # not available. If we fail to resolve, sharing is not enabled.
817 # not available. If we fail to resolve, sharing is not enabled.
813 try:
818 try:
814 with srcpeer.commandexecutor() as e:
819 with srcpeer.commandexecutor() as e:
815 rootnode = e.callcommand(
820 rootnode = e.callcommand(
816 b'lookup',
821 b'lookup',
817 {
822 {
818 b'key': b'0',
823 b'key': b'0',
819 },
824 },
820 ).result()
825 ).result()
821
826
822 if rootnode != sha1nodeconstants.nullid:
827 if rootnode != sha1nodeconstants.nullid:
823 sharepath = os.path.join(sharepool, hex(rootnode))
828 sharepath = os.path.join(sharepool, hex(rootnode))
824 else:
829 else:
825 ui.status(
830 ui.status(
826 _(
831 _(
827 b'(not using pooled storage: '
832 b'(not using pooled storage: '
828 b'remote appears to be empty)\n'
833 b'remote appears to be empty)\n'
829 )
834 )
830 )
835 )
831 except error.RepoLookupError:
836 except error.RepoLookupError:
832 ui.status(
837 ui.status(
833 _(
838 _(
834 b'(not using pooled storage: '
839 b'(not using pooled storage: '
835 b'unable to resolve identity of remote)\n'
840 b'unable to resolve identity of remote)\n'
836 )
841 )
837 )
842 )
838 elif sharenamemode == b'remote':
843 elif sharenamemode == b'remote':
839 sharepath = os.path.join(
844 sharepath = os.path.join(
840 sharepool, hex(hashutil.sha1(source).digest())
845 sharepool, hex(hashutil.sha1(source).digest())
841 )
846 )
842 else:
847 else:
843 raise error.Abort(
848 raise error.Abort(
844 _(b'unknown share naming mode: %s') % sharenamemode
849 _(b'unknown share naming mode: %s') % sharenamemode
845 )
850 )
846
851
847 # TODO this is a somewhat arbitrary restriction.
852 # TODO this is a somewhat arbitrary restriction.
848 if narrow:
853 if narrow:
849 ui.status(
854 ui.status(
850 _(b'(pooled storage not supported for narrow clones)\n')
855 _(b'(pooled storage not supported for narrow clones)\n')
851 )
856 )
852 sharepath = None
857 sharepath = None
853
858
854 if sharepath:
859 if sharepath:
855 return clonewithshare(
860 return clonewithshare(
856 ui,
861 ui,
857 peeropts,
862 peeropts,
858 sharepath,
863 sharepath,
859 source,
864 source,
860 srcpeer,
865 srcpeer,
861 dest,
866 dest,
862 pull=pull,
867 pull=pull,
863 rev=revs,
868 rev=revs,
864 update=update,
869 update=update,
865 stream=stream,
870 stream=stream,
866 )
871 )
867
872
868 srcrepo = srcpeer.local()
873 srcrepo = srcpeer.local()
869
874
870 abspath = origsource
875 abspath = origsource
871 if islocal(origsource):
876 if islocal(origsource):
872 abspath = util.abspath(urlutil.urllocalpath(origsource))
877 abspath = util.abspath(urlutil.urllocalpath(origsource))
873
878
874 if islocal(dest):
879 if islocal(dest):
875 if os.path.exists(dest):
880 if os.path.exists(dest):
876 # only clean up directories we create ourselves
881 # only clean up directories we create ourselves
877 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
882 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
878 cleandir = hgdir
883 cleandir = hgdir
879 else:
884 else:
880 cleandir = dest
885 cleandir = dest
881
886
882 copy = False
887 copy = False
883 if (
888 if (
884 srcrepo
889 srcrepo
885 and srcrepo.cancopy()
890 and srcrepo.cancopy()
886 and islocal(dest)
891 and islocal(dest)
887 and not phases.hassecret(srcrepo)
892 and not phases.hassecret(srcrepo)
888 ):
893 ):
889 copy = not pull and not revs
894 copy = not pull and not revs
890
895
891 # TODO this is a somewhat arbitrary restriction.
896 # TODO this is a somewhat arbitrary restriction.
892 if narrow:
897 if narrow:
893 copy = False
898 copy = False
894
899
895 if copy:
900 if copy:
896 try:
901 try:
897 # we use a lock here because if we race with commit, we
902 # we use a lock here because if we race with commit, we
898 # can end up with extra data in the cloned revlogs that's
903 # can end up with extra data in the cloned revlogs that's
899 # not pointed to by changesets, thus causing verify to
904 # not pointed to by changesets, thus causing verify to
900 # fail
905 # fail
901 srclock = srcrepo.lock(wait=False)
906 srclock = srcrepo.lock(wait=False)
902 except error.LockError:
907 except error.LockError:
903 copy = False
908 copy = False
904
909
905 if copy:
910 if copy:
906 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
911 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
907
912
908 destrootpath = urlutil.urllocalpath(dest)
913 destrootpath = urlutil.urllocalpath(dest)
909 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
914 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
910 localrepo.createrepository(
915 localrepo.createrepository(
911 ui,
916 ui,
912 destrootpath,
917 destrootpath,
913 requirements=dest_reqs,
918 requirements=dest_reqs,
914 )
919 )
915 destrepo = localrepo.makelocalrepository(ui, destrootpath)
920 destrepo = localrepo.makelocalrepository(ui, destrootpath)
916
921
917 destwlock = destrepo.wlock()
922 destwlock = destrepo.wlock()
918 destlock = destrepo.lock()
923 destlock = destrepo.lock()
919 from . import streamclone # avoid cycle
924 from . import streamclone # avoid cycle
920
925
921 streamclone.local_copy(srcrepo, destrepo)
926 streamclone.local_copy(srcrepo, destrepo)
922
927
923 # we need to re-init the repo after manually copying the data
928 # we need to re-init the repo after manually copying the data
924 # into it
929 # into it
925 destpeer = peer(srcrepo, peeropts, dest)
930 destpeer = peer(srcrepo, peeropts, dest)
926
931
927 # make the peer aware that is it already locked
932 # make the peer aware that is it already locked
928 #
933 #
929 # important:
934 # important:
930 #
935 #
931 # We still need to release that lock at the end of the function
936 # We still need to release that lock at the end of the function
932 destpeer.local()._lockref = weakref.ref(destlock)
937 destpeer.local()._lockref = weakref.ref(destlock)
933 destpeer.local()._wlockref = weakref.ref(destwlock)
938 destpeer.local()._wlockref = weakref.ref(destwlock)
934 # dirstate also needs to be copied because `_wlockref` has a reference
939 # dirstate also needs to be copied because `_wlockref` has a reference
935 # to it: this dirstate is saved to disk when the wlock is released
940 # to it: this dirstate is saved to disk when the wlock is released
936 destpeer.local().dirstate = destrepo.dirstate
941 destpeer.local().dirstate = destrepo.dirstate
937
942
938 srcrepo.hook(
943 srcrepo.hook(
939 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
944 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
940 )
945 )
941 else:
946 else:
942 try:
947 try:
943 # only pass ui when no srcrepo
948 # only pass ui when no srcrepo
944 destpeer = peer(
949 destpeer = peer(
945 srcrepo or ui,
950 srcrepo or ui,
946 peeropts,
951 peeropts,
947 dest,
952 dest,
948 create=True,
953 create=True,
949 createopts=createopts,
954 createopts=createopts,
950 )
955 )
951 except FileExistsError:
956 except FileExistsError:
952 cleandir = None
957 cleandir = None
953 raise error.Abort(_(b"destination '%s' already exists") % dest)
958 raise error.Abort(_(b"destination '%s' already exists") % dest)
954
959
955 if revs:
960 if revs:
956 if not srcpeer.capable(b'lookup'):
961 if not srcpeer.capable(b'lookup'):
957 raise error.Abort(
962 raise error.Abort(
958 _(
963 _(
959 b"src repository does not support "
964 b"src repository does not support "
960 b"revision lookup and so doesn't "
965 b"revision lookup and so doesn't "
961 b"support clone by revision"
966 b"support clone by revision"
962 )
967 )
963 )
968 )
964
969
965 # TODO this is batchable.
970 # TODO this is batchable.
966 remoterevs = []
971 remoterevs = []
967 for rev in revs:
972 for rev in revs:
968 with srcpeer.commandexecutor() as e:
973 with srcpeer.commandexecutor() as e:
969 remoterevs.append(
974 remoterevs.append(
970 e.callcommand(
975 e.callcommand(
971 b'lookup',
976 b'lookup',
972 {
977 {
973 b'key': rev,
978 b'key': rev,
974 },
979 },
975 ).result()
980 ).result()
976 )
981 )
977 revs = remoterevs
982 revs = remoterevs
978
983
979 checkout = revs[0]
984 checkout = revs[0]
980 else:
985 else:
981 revs = None
986 revs = None
982 local = destpeer.local()
987 local = destpeer.local()
983 if local:
988 if local:
984 if narrow:
989 if narrow:
985 with local.wlock(), local.lock():
990 with local.wlock(), local.lock():
986 local.setnarrowpats(storeincludepats, storeexcludepats)
991 local.setnarrowpats(storeincludepats, storeexcludepats)
987 narrowspec.copytoworkingcopy(local)
992 narrowspec.copytoworkingcopy(local)
988
993
989 u = urlutil.url(abspath)
994 u = urlutil.url(abspath)
990 defaulturl = bytes(u)
995 defaulturl = bytes(u)
991 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
996 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
992 if not stream:
997 if not stream:
993 if pull:
998 if pull:
994 stream = False
999 stream = False
995 else:
1000 else:
996 stream = None
1001 stream = None
997 # internal config: ui.quietbookmarkmove
1002 # internal config: ui.quietbookmarkmove
998 overrides = {(b'ui', b'quietbookmarkmove'): True}
1003 overrides = {(b'ui', b'quietbookmarkmove'): True}
999 with local.ui.configoverride(overrides, b'clone'):
1004 with local.ui.configoverride(overrides, b'clone'):
1000 exchange.pull(
1005 exchange.pull(
1001 local,
1006 local,
1002 srcpeer,
1007 srcpeer,
1003 heads=revs,
1008 heads=revs,
1004 streamclonerequested=stream,
1009 streamclonerequested=stream,
1005 includepats=storeincludepats,
1010 includepats=storeincludepats,
1006 excludepats=storeexcludepats,
1011 excludepats=storeexcludepats,
1007 depth=depth,
1012 depth=depth,
1008 )
1013 )
1009 elif srcrepo:
1014 elif srcrepo:
1010 # TODO lift restriction once exchange.push() accepts narrow
1015 # TODO lift restriction once exchange.push() accepts narrow
1011 # push.
1016 # push.
1012 if narrow:
1017 if narrow:
1013 raise error.Abort(
1018 raise error.Abort(
1014 _(
1019 _(
1015 b'narrow clone not available for '
1020 b'narrow clone not available for '
1016 b'remote destinations'
1021 b'remote destinations'
1017 )
1022 )
1018 )
1023 )
1019
1024
1020 exchange.push(
1025 exchange.push(
1021 srcrepo,
1026 srcrepo,
1022 destpeer,
1027 destpeer,
1023 revs=revs,
1028 revs=revs,
1024 bookmarks=srcrepo._bookmarks.keys(),
1029 bookmarks=srcrepo._bookmarks.keys(),
1025 )
1030 )
1026 else:
1031 else:
1027 raise error.Abort(
1032 raise error.Abort(
1028 _(b"clone from remote to remote not supported")
1033 _(b"clone from remote to remote not supported")
1029 )
1034 )
1030
1035
1031 cleandir = None
1036 cleandir = None
1032
1037
1033 destrepo = destpeer.local()
1038 destrepo = destpeer.local()
1034 if destrepo:
1039 if destrepo:
1035 template = uimod.samplehgrcs[b'cloned']
1040 template = uimod.samplehgrcs[b'cloned']
1036 u = urlutil.url(abspath)
1041 u = urlutil.url(abspath)
1037 u.passwd = None
1042 u.passwd = None
1038 defaulturl = bytes(u)
1043 defaulturl = bytes(u)
1039 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1044 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1040 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1045 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1041
1046
1042 if ui.configbool(b'experimental', b'remotenames'):
1047 if ui.configbool(b'experimental', b'remotenames'):
1043 logexchange.pullremotenames(destrepo, srcpeer)
1048 logexchange.pullremotenames(destrepo, srcpeer)
1044
1049
1045 if update:
1050 if update:
1046 if update is not True:
1051 if update is not True:
1047 with srcpeer.commandexecutor() as e:
1052 with srcpeer.commandexecutor() as e:
1048 checkout = e.callcommand(
1053 checkout = e.callcommand(
1049 b'lookup',
1054 b'lookup',
1050 {
1055 {
1051 b'key': update,
1056 b'key': update,
1052 },
1057 },
1053 ).result()
1058 ).result()
1054
1059
1055 uprev = None
1060 uprev = None
1056 status = None
1061 status = None
1057 if checkout is not None:
1062 if checkout is not None:
1058 # Some extensions (at least hg-git and hg-subversion) have
1063 # Some extensions (at least hg-git and hg-subversion) have
1059 # a peer.lookup() implementation that returns a name instead
1064 # a peer.lookup() implementation that returns a name instead
1060 # of a nodeid. We work around it here until we've figured
1065 # of a nodeid. We work around it here until we've figured
1061 # out a better solution.
1066 # out a better solution.
1062 if len(checkout) == 20 and checkout in destrepo:
1067 if len(checkout) == 20 and checkout in destrepo:
1063 uprev = checkout
1068 uprev = checkout
1064 elif scmutil.isrevsymbol(destrepo, checkout):
1069 elif scmutil.isrevsymbol(destrepo, checkout):
1065 uprev = scmutil.revsymbol(destrepo, checkout).node()
1070 uprev = scmutil.revsymbol(destrepo, checkout).node()
1066 else:
1071 else:
1067 if update is not True:
1072 if update is not True:
1068 try:
1073 try:
1069 uprev = destrepo.lookup(update)
1074 uprev = destrepo.lookup(update)
1070 except error.RepoLookupError:
1075 except error.RepoLookupError:
1071 pass
1076 pass
1072 if uprev is None:
1077 if uprev is None:
1073 try:
1078 try:
1074 if destrepo._activebookmark:
1079 if destrepo._activebookmark:
1075 uprev = destrepo.lookup(destrepo._activebookmark)
1080 uprev = destrepo.lookup(destrepo._activebookmark)
1076 update = destrepo._activebookmark
1081 update = destrepo._activebookmark
1077 else:
1082 else:
1078 uprev = destrepo._bookmarks[b'@']
1083 uprev = destrepo._bookmarks[b'@']
1079 update = b'@'
1084 update = b'@'
1080 bn = destrepo[uprev].branch()
1085 bn = destrepo[uprev].branch()
1081 if bn == b'default':
1086 if bn == b'default':
1082 status = _(b"updating to bookmark %s\n" % update)
1087 status = _(b"updating to bookmark %s\n" % update)
1083 else:
1088 else:
1084 status = (
1089 status = (
1085 _(b"updating to bookmark %s on branch %s\n")
1090 _(b"updating to bookmark %s on branch %s\n")
1086 ) % (update, bn)
1091 ) % (update, bn)
1087 except KeyError:
1092 except KeyError:
1088 try:
1093 try:
1089 uprev = destrepo.branchtip(b'default')
1094 uprev = destrepo.branchtip(b'default')
1090 except error.RepoLookupError:
1095 except error.RepoLookupError:
1091 uprev = destrepo.lookup(b'tip')
1096 uprev = destrepo.lookup(b'tip')
1092 if not status:
1097 if not status:
1093 bn = destrepo[uprev].branch()
1098 bn = destrepo[uprev].branch()
1094 status = _(b"updating to branch %s\n") % bn
1099 status = _(b"updating to branch %s\n") % bn
1095 destrepo.ui.status(status)
1100 destrepo.ui.status(status)
1096 _update(destrepo, uprev)
1101 _update(destrepo, uprev)
1097 if update in destrepo._bookmarks:
1102 if update in destrepo._bookmarks:
1098 bookmarks.activate(destrepo, update)
1103 bookmarks.activate(destrepo, update)
1099 if destlock is not None:
1104 if destlock is not None:
1100 release(destlock)
1105 release(destlock)
1101 if destwlock is not None:
1106 if destwlock is not None:
1102 release(destlock)
1107 release(destlock)
1103 # here is a tiny windows were someone could end up writing the
1108 # here is a tiny windows were someone could end up writing the
1104 # repository before the cache are sure to be warm. This is "fine"
1109 # repository before the cache are sure to be warm. This is "fine"
1105 # as the only "bad" outcome would be some slowness. That potential
1110 # as the only "bad" outcome would be some slowness. That potential
1106 # slowness already affect reader.
1111 # slowness already affect reader.
1107 with destrepo.lock():
1112 with destrepo.lock():
1108 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1113 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1109 finally:
1114 finally:
1110 release(srclock, destlock, destwlock)
1115 release(srclock, destlock, destwlock)
1111 if cleandir is not None:
1116 if cleandir is not None:
1112 shutil.rmtree(cleandir, True)
1117 shutil.rmtree(cleandir, True)
1113 if srcpeer is not None:
1118 if srcpeer is not None:
1114 srcpeer.close()
1119 srcpeer.close()
1115 if destpeer and destpeer.local() is None:
1120 if destpeer and destpeer.local() is None:
1116 destpeer.close()
1121 destpeer.close()
1117 return srcpeer, destpeer
1122 return srcpeer, destpeer
1118
1123
1119
1124
1120 def _showstats(repo, stats, quietempty=False):
1125 def _showstats(repo, stats, quietempty=False):
1121 if quietempty and stats.isempty():
1126 if quietempty and stats.isempty():
1122 return
1127 return
1123 repo.ui.status(
1128 repo.ui.status(
1124 _(
1129 _(
1125 b"%d files updated, %d files merged, "
1130 b"%d files updated, %d files merged, "
1126 b"%d files removed, %d files unresolved\n"
1131 b"%d files removed, %d files unresolved\n"
1127 )
1132 )
1128 % (
1133 % (
1129 stats.updatedcount,
1134 stats.updatedcount,
1130 stats.mergedcount,
1135 stats.mergedcount,
1131 stats.removedcount,
1136 stats.removedcount,
1132 stats.unresolvedcount,
1137 stats.unresolvedcount,
1133 )
1138 )
1134 )
1139 )
1135
1140
1136
1141
1137 def updaterepo(repo, node, overwrite, updatecheck=None):
1142 def updaterepo(repo, node, overwrite, updatecheck=None):
1138 """Update the working directory to node.
1143 """Update the working directory to node.
1139
1144
1140 When overwrite is set, changes are clobbered, merged else
1145 When overwrite is set, changes are clobbered, merged else
1141
1146
1142 returns stats (see pydoc mercurial.merge.applyupdates)"""
1147 returns stats (see pydoc mercurial.merge.applyupdates)"""
1143 repo.ui.deprecwarn(
1148 repo.ui.deprecwarn(
1144 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1149 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1145 b'5.7',
1150 b'5.7',
1146 )
1151 )
1147 return mergemod._update(
1152 return mergemod._update(
1148 repo,
1153 repo,
1149 node,
1154 node,
1150 branchmerge=False,
1155 branchmerge=False,
1151 force=overwrite,
1156 force=overwrite,
1152 labels=[b'working copy', b'destination'],
1157 labels=[b'working copy', b'destination'],
1153 updatecheck=updatecheck,
1158 updatecheck=updatecheck,
1154 )
1159 )
1155
1160
1156
1161
1157 def update(repo, node, quietempty=False, updatecheck=None):
1162 def update(repo, node, quietempty=False, updatecheck=None):
1158 """update the working directory to node"""
1163 """update the working directory to node"""
1159 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1164 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1160 _showstats(repo, stats, quietempty)
1165 _showstats(repo, stats, quietempty)
1161 if stats.unresolvedcount:
1166 if stats.unresolvedcount:
1162 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1167 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1163 return stats.unresolvedcount > 0
1168 return stats.unresolvedcount > 0
1164
1169
1165
1170
1166 # naming conflict in clone()
1171 # naming conflict in clone()
1167 _update = update
1172 _update = update
1168
1173
1169
1174
1170 def clean(repo, node, show_stats=True, quietempty=False):
1175 def clean(repo, node, show_stats=True, quietempty=False):
1171 """forcibly switch the working directory to node, clobbering changes"""
1176 """forcibly switch the working directory to node, clobbering changes"""
1172 stats = mergemod.clean_update(repo[node])
1177 stats = mergemod.clean_update(repo[node])
1173 assert stats.unresolvedcount == 0
1178 assert stats.unresolvedcount == 0
1174 if show_stats:
1179 if show_stats:
1175 _showstats(repo, stats, quietempty)
1180 _showstats(repo, stats, quietempty)
1176 return False
1181 return False
1177
1182
1178
1183
1179 # naming conflict in updatetotally()
1184 # naming conflict in updatetotally()
1180 _clean = clean
1185 _clean = clean
1181
1186
1182 _VALID_UPDATECHECKS = {
1187 _VALID_UPDATECHECKS = {
1183 mergemod.UPDATECHECK_ABORT,
1188 mergemod.UPDATECHECK_ABORT,
1184 mergemod.UPDATECHECK_NONE,
1189 mergemod.UPDATECHECK_NONE,
1185 mergemod.UPDATECHECK_LINEAR,
1190 mergemod.UPDATECHECK_LINEAR,
1186 mergemod.UPDATECHECK_NO_CONFLICT,
1191 mergemod.UPDATECHECK_NO_CONFLICT,
1187 }
1192 }
1188
1193
1189
1194
1190 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1195 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1191 """Update the working directory with extra care for non-file components
1196 """Update the working directory with extra care for non-file components
1192
1197
1193 This takes care of non-file components below:
1198 This takes care of non-file components below:
1194
1199
1195 :bookmark: might be advanced or (in)activated
1200 :bookmark: might be advanced or (in)activated
1196
1201
1197 This takes arguments below:
1202 This takes arguments below:
1198
1203
1199 :checkout: to which revision the working directory is updated
1204 :checkout: to which revision the working directory is updated
1200 :brev: a name, which might be a bookmark to be activated after updating
1205 :brev: a name, which might be a bookmark to be activated after updating
1201 :clean: whether changes in the working directory can be discarded
1206 :clean: whether changes in the working directory can be discarded
1202 :updatecheck: how to deal with a dirty working directory
1207 :updatecheck: how to deal with a dirty working directory
1203
1208
1204 Valid values for updatecheck are the UPDATECHECK_* constants
1209 Valid values for updatecheck are the UPDATECHECK_* constants
1205 defined in the merge module. Passing `None` will result in using the
1210 defined in the merge module. Passing `None` will result in using the
1206 configured default.
1211 configured default.
1207
1212
1208 * ABORT: abort if the working directory is dirty
1213 * ABORT: abort if the working directory is dirty
1209 * NONE: don't check (merge working directory changes into destination)
1214 * NONE: don't check (merge working directory changes into destination)
1210 * LINEAR: check that update is linear before merging working directory
1215 * LINEAR: check that update is linear before merging working directory
1211 changes into destination
1216 changes into destination
1212 * NO_CONFLICT: check that the update does not result in file merges
1217 * NO_CONFLICT: check that the update does not result in file merges
1213
1218
1214 This returns whether conflict is detected at updating or not.
1219 This returns whether conflict is detected at updating or not.
1215 """
1220 """
1216 if updatecheck is None:
1221 if updatecheck is None:
1217 updatecheck = ui.config(b'commands', b'update.check')
1222 updatecheck = ui.config(b'commands', b'update.check')
1218 if updatecheck not in _VALID_UPDATECHECKS:
1223 if updatecheck not in _VALID_UPDATECHECKS:
1219 # If not configured, or invalid value configured
1224 # If not configured, or invalid value configured
1220 updatecheck = mergemod.UPDATECHECK_LINEAR
1225 updatecheck = mergemod.UPDATECHECK_LINEAR
1221 if updatecheck not in _VALID_UPDATECHECKS:
1226 if updatecheck not in _VALID_UPDATECHECKS:
1222 raise ValueError(
1227 raise ValueError(
1223 r'Invalid updatecheck value %r (can accept %r)'
1228 r'Invalid updatecheck value %r (can accept %r)'
1224 % (updatecheck, _VALID_UPDATECHECKS)
1229 % (updatecheck, _VALID_UPDATECHECKS)
1225 )
1230 )
1226 with repo.wlock():
1231 with repo.wlock():
1227 movemarkfrom = None
1232 movemarkfrom = None
1228 warndest = False
1233 warndest = False
1229 if checkout is None:
1234 if checkout is None:
1230 updata = destutil.destupdate(repo, clean=clean)
1235 updata = destutil.destupdate(repo, clean=clean)
1231 checkout, movemarkfrom, brev = updata
1236 checkout, movemarkfrom, brev = updata
1232 warndest = True
1237 warndest = True
1233
1238
1234 if clean:
1239 if clean:
1235 ret = _clean(repo, checkout)
1240 ret = _clean(repo, checkout)
1236 else:
1241 else:
1237 if updatecheck == mergemod.UPDATECHECK_ABORT:
1242 if updatecheck == mergemod.UPDATECHECK_ABORT:
1238 cmdutil.bailifchanged(repo, merge=False)
1243 cmdutil.bailifchanged(repo, merge=False)
1239 updatecheck = mergemod.UPDATECHECK_NONE
1244 updatecheck = mergemod.UPDATECHECK_NONE
1240 ret = _update(repo, checkout, updatecheck=updatecheck)
1245 ret = _update(repo, checkout, updatecheck=updatecheck)
1241
1246
1242 if not ret and movemarkfrom:
1247 if not ret and movemarkfrom:
1243 if movemarkfrom == repo[b'.'].node():
1248 if movemarkfrom == repo[b'.'].node():
1244 pass # no-op update
1249 pass # no-op update
1245 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1250 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1246 b = ui.label(repo._activebookmark, b'bookmarks.active')
1251 b = ui.label(repo._activebookmark, b'bookmarks.active')
1247 ui.status(_(b"updating bookmark %s\n") % b)
1252 ui.status(_(b"updating bookmark %s\n") % b)
1248 else:
1253 else:
1249 # this can happen with a non-linear update
1254 # this can happen with a non-linear update
1250 b = ui.label(repo._activebookmark, b'bookmarks')
1255 b = ui.label(repo._activebookmark, b'bookmarks')
1251 ui.status(_(b"(leaving bookmark %s)\n") % b)
1256 ui.status(_(b"(leaving bookmark %s)\n") % b)
1252 bookmarks.deactivate(repo)
1257 bookmarks.deactivate(repo)
1253 elif brev in repo._bookmarks:
1258 elif brev in repo._bookmarks:
1254 if brev != repo._activebookmark:
1259 if brev != repo._activebookmark:
1255 b = ui.label(brev, b'bookmarks.active')
1260 b = ui.label(brev, b'bookmarks.active')
1256 ui.status(_(b"(activating bookmark %s)\n") % b)
1261 ui.status(_(b"(activating bookmark %s)\n") % b)
1257 bookmarks.activate(repo, brev)
1262 bookmarks.activate(repo, brev)
1258 elif brev:
1263 elif brev:
1259 if repo._activebookmark:
1264 if repo._activebookmark:
1260 b = ui.label(repo._activebookmark, b'bookmarks')
1265 b = ui.label(repo._activebookmark, b'bookmarks')
1261 ui.status(_(b"(leaving bookmark %s)\n") % b)
1266 ui.status(_(b"(leaving bookmark %s)\n") % b)
1262 bookmarks.deactivate(repo)
1267 bookmarks.deactivate(repo)
1263
1268
1264 if warndest:
1269 if warndest:
1265 destutil.statusotherdests(ui, repo)
1270 destutil.statusotherdests(ui, repo)
1266
1271
1267 return ret
1272 return ret
1268
1273
1269
1274
1270 def merge(
1275 def merge(
1271 ctx,
1276 ctx,
1272 force=False,
1277 force=False,
1273 remind=True,
1278 remind=True,
1274 labels=None,
1279 labels=None,
1275 ):
1280 ):
1276 """Branch merge with node, resolving changes. Return true if any
1281 """Branch merge with node, resolving changes. Return true if any
1277 unresolved conflicts."""
1282 unresolved conflicts."""
1278 repo = ctx.repo()
1283 repo = ctx.repo()
1279 stats = mergemod.merge(ctx, force=force, labels=labels)
1284 stats = mergemod.merge(ctx, force=force, labels=labels)
1280 _showstats(repo, stats)
1285 _showstats(repo, stats)
1281 if stats.unresolvedcount:
1286 if stats.unresolvedcount:
1282 repo.ui.status(
1287 repo.ui.status(
1283 _(
1288 _(
1284 b"use 'hg resolve' to retry unresolved file merges "
1289 b"use 'hg resolve' to retry unresolved file merges "
1285 b"or 'hg merge --abort' to abandon\n"
1290 b"or 'hg merge --abort' to abandon\n"
1286 )
1291 )
1287 )
1292 )
1288 elif remind:
1293 elif remind:
1289 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1294 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1290 return stats.unresolvedcount > 0
1295 return stats.unresolvedcount > 0
1291
1296
1292
1297
1293 def abortmerge(ui, repo):
1298 def abortmerge(ui, repo):
1294 ms = mergestatemod.mergestate.read(repo)
1299 ms = mergestatemod.mergestate.read(repo)
1295 if ms.active():
1300 if ms.active():
1296 # there were conflicts
1301 # there were conflicts
1297 node = ms.localctx.hex()
1302 node = ms.localctx.hex()
1298 else:
1303 else:
1299 # there were no conficts, mergestate was not stored
1304 # there were no conficts, mergestate was not stored
1300 node = repo[b'.'].hex()
1305 node = repo[b'.'].hex()
1301
1306
1302 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1307 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1303 stats = mergemod.clean_update(repo[node])
1308 stats = mergemod.clean_update(repo[node])
1304 assert stats.unresolvedcount == 0
1309 assert stats.unresolvedcount == 0
1305 _showstats(repo, stats)
1310 _showstats(repo, stats)
1306
1311
1307
1312
1308 def _incoming(
1313 def _incoming(
1309 displaychlist,
1314 displaychlist,
1310 subreporecurse,
1315 subreporecurse,
1311 ui,
1316 ui,
1312 repo,
1317 repo,
1313 source,
1318 source,
1314 opts,
1319 opts,
1315 buffered=False,
1320 buffered=False,
1316 subpath=None,
1321 subpath=None,
1317 ):
1322 ):
1318 """
1323 """
1319 Helper for incoming / gincoming.
1324 Helper for incoming / gincoming.
1320 displaychlist gets called with
1325 displaychlist gets called with
1321 (remoterepo, incomingchangesetlist, displayer) parameters,
1326 (remoterepo, incomingchangesetlist, displayer) parameters,
1322 and is supposed to contain only code that can't be unified.
1327 and is supposed to contain only code that can't be unified.
1323 """
1328 """
1324 srcs = urlutil.get_pull_paths(repo, ui, [source])
1329 srcs = urlutil.get_pull_paths(repo, ui, [source])
1325 srcs = list(srcs)
1330 srcs = list(srcs)
1326 if len(srcs) != 1:
1331 if len(srcs) != 1:
1327 msg = _(b'for now, incoming supports only a single source, %d provided')
1332 msg = _(b'for now, incoming supports only a single source, %d provided')
1328 msg %= len(srcs)
1333 msg %= len(srcs)
1329 raise error.Abort(msg)
1334 raise error.Abort(msg)
1330 path = srcs[0]
1335 path = srcs[0]
1331 if subpath is None:
1336 if subpath is None:
1332 peer_path = path
1337 peer_path = path
1333 url = path.loc
1338 url = path.loc
1334 else:
1339 else:
1335 # XXX path: we are losing the `path` object here. Keeping it would be
1340 # XXX path: we are losing the `path` object here. Keeping it would be
1336 # valuable. For example as a "variant" as we do for pushes.
1341 # valuable. For example as a "variant" as we do for pushes.
1337 subpath = urlutil.url(subpath)
1342 subpath = urlutil.url(subpath)
1338 if subpath.isabs():
1343 if subpath.isabs():
1339 peer_path = url = bytes(subpath)
1344 peer_path = url = bytes(subpath)
1340 else:
1345 else:
1341 p = urlutil.url(path.loc)
1346 p = urlutil.url(path.loc)
1342 if p.islocal():
1347 if p.islocal():
1343 normpath = os.path.normpath
1348 normpath = os.path.normpath
1344 else:
1349 else:
1345 normpath = posixpath.normpath
1350 normpath = posixpath.normpath
1346 p.path = normpath(b'%s/%s' % (p.path, subpath))
1351 p.path = normpath(b'%s/%s' % (p.path, subpath))
1347 peer_path = url = bytes(p)
1352 peer_path = url = bytes(p)
1348 other = peer(repo, opts, peer_path)
1353 other = peer(repo, opts, peer_path)
1349 cleanupfn = other.close
1354 cleanupfn = other.close
1350 try:
1355 try:
1351 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1352 branches = (path.branch, opts.get(b'branch', []))
1357 branches = (path.branch, opts.get(b'branch', []))
1353 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1358 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1354
1359
1355 if revs:
1360 if revs:
1356 revs = [other.lookup(rev) for rev in revs]
1361 revs = [other.lookup(rev) for rev in revs]
1357 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1362 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1358 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1363 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1359 )
1364 )
1360
1365
1361 if not chlist:
1366 if not chlist:
1362 ui.status(_(b"no changes found\n"))
1367 ui.status(_(b"no changes found\n"))
1363 return subreporecurse()
1368 return subreporecurse()
1364 ui.pager(b'incoming')
1369 ui.pager(b'incoming')
1365 displayer = logcmdutil.changesetdisplayer(
1370 displayer = logcmdutil.changesetdisplayer(
1366 ui, other, opts, buffered=buffered
1371 ui, other, opts, buffered=buffered
1367 )
1372 )
1368 displaychlist(other, chlist, displayer)
1373 displaychlist(other, chlist, displayer)
1369 displayer.close()
1374 displayer.close()
1370 finally:
1375 finally:
1371 cleanupfn()
1376 cleanupfn()
1372 subreporecurse()
1377 subreporecurse()
1373 return 0 # exit code is zero since we found incoming changes
1378 return 0 # exit code is zero since we found incoming changes
1374
1379
1375
1380
1376 def incoming(ui, repo, source, opts, subpath=None):
1381 def incoming(ui, repo, source, opts, subpath=None):
1377 def subreporecurse():
1382 def subreporecurse():
1378 ret = 1
1383 ret = 1
1379 if opts.get(b'subrepos'):
1384 if opts.get(b'subrepos'):
1380 ctx = repo[None]
1385 ctx = repo[None]
1381 for subpath in sorted(ctx.substate):
1386 for subpath in sorted(ctx.substate):
1382 sub = ctx.sub(subpath)
1387 sub = ctx.sub(subpath)
1383 ret = min(ret, sub.incoming(ui, source, opts))
1388 ret = min(ret, sub.incoming(ui, source, opts))
1384 return ret
1389 return ret
1385
1390
1386 def display(other, chlist, displayer):
1391 def display(other, chlist, displayer):
1387 limit = logcmdutil.getlimit(opts)
1392 limit = logcmdutil.getlimit(opts)
1388 if opts.get(b'newest_first'):
1393 if opts.get(b'newest_first'):
1389 chlist.reverse()
1394 chlist.reverse()
1390 count = 0
1395 count = 0
1391 for n in chlist:
1396 for n in chlist:
1392 if limit is not None and count >= limit:
1397 if limit is not None and count >= limit:
1393 break
1398 break
1394 parents = [
1399 parents = [
1395 p for p in other.changelog.parents(n) if p != repo.nullid
1400 p for p in other.changelog.parents(n) if p != repo.nullid
1396 ]
1401 ]
1397 if opts.get(b'no_merges') and len(parents) == 2:
1402 if opts.get(b'no_merges') and len(parents) == 2:
1398 continue
1403 continue
1399 count += 1
1404 count += 1
1400 displayer.show(other[n])
1405 displayer.show(other[n])
1401
1406
1402 return _incoming(
1407 return _incoming(
1403 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1408 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1404 )
1409 )
1405
1410
1406
1411
1407 def _outgoing(ui, repo, dests, opts, subpath=None):
1412 def _outgoing(ui, repo, dests, opts, subpath=None):
1408 out = set()
1413 out = set()
1409 others = []
1414 others = []
1410 for path in urlutil.get_push_paths(repo, ui, dests):
1415 for path in urlutil.get_push_paths(repo, ui, dests):
1411 dest = path.loc
1416 dest = path.loc
1412 if subpath is not None:
1417 if subpath is not None:
1413 subpath = urlutil.url(subpath)
1418 subpath = urlutil.url(subpath)
1414 if subpath.isabs():
1419 if subpath.isabs():
1415 dest = bytes(subpath)
1420 dest = bytes(subpath)
1416 else:
1421 else:
1417 p = urlutil.url(dest)
1422 p = urlutil.url(dest)
1418 if p.islocal():
1423 if p.islocal():
1419 normpath = os.path.normpath
1424 normpath = os.path.normpath
1420 else:
1425 else:
1421 normpath = posixpath.normpath
1426 normpath = posixpath.normpath
1422 p.path = normpath(b'%s/%s' % (p.path, subpath))
1427 p.path = normpath(b'%s/%s' % (p.path, subpath))
1423 dest = bytes(p)
1428 dest = bytes(p)
1424 branches = path.branch, opts.get(b'branch') or []
1429 branches = path.branch, opts.get(b'branch') or []
1425
1430
1426 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1431 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1427 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1432 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1428 if revs:
1433 if revs:
1429 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1434 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1430
1435
1431 other = peer(repo, opts, dest)
1436 other = peer(repo, opts, dest)
1432 try:
1437 try:
1433 outgoing = discovery.findcommonoutgoing(
1438 outgoing = discovery.findcommonoutgoing(
1434 repo, other, revs, force=opts.get(b'force')
1439 repo, other, revs, force=opts.get(b'force')
1435 )
1440 )
1436 o = outgoing.missing
1441 o = outgoing.missing
1437 out.update(o)
1442 out.update(o)
1438 if not o:
1443 if not o:
1439 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1444 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1440 others.append(other)
1445 others.append(other)
1441 except: # re-raises
1446 except: # re-raises
1442 other.close()
1447 other.close()
1443 raise
1448 raise
1444 # make sure this is ordered by revision number
1449 # make sure this is ordered by revision number
1445 outgoing_revs = list(out)
1450 outgoing_revs = list(out)
1446 cl = repo.changelog
1451 cl = repo.changelog
1447 outgoing_revs.sort(key=cl.rev)
1452 outgoing_revs.sort(key=cl.rev)
1448 return outgoing_revs, others
1453 return outgoing_revs, others
1449
1454
1450
1455
1451 def _outgoing_recurse(ui, repo, dests, opts):
1456 def _outgoing_recurse(ui, repo, dests, opts):
1452 ret = 1
1457 ret = 1
1453 if opts.get(b'subrepos'):
1458 if opts.get(b'subrepos'):
1454 ctx = repo[None]
1459 ctx = repo[None]
1455 for subpath in sorted(ctx.substate):
1460 for subpath in sorted(ctx.substate):
1456 sub = ctx.sub(subpath)
1461 sub = ctx.sub(subpath)
1457 ret = min(ret, sub.outgoing(ui, dests, opts))
1462 ret = min(ret, sub.outgoing(ui, dests, opts))
1458 return ret
1463 return ret
1459
1464
1460
1465
1461 def _outgoing_filter(repo, revs, opts):
1466 def _outgoing_filter(repo, revs, opts):
1462 """apply revision filtering/ordering option for outgoing"""
1467 """apply revision filtering/ordering option for outgoing"""
1463 limit = logcmdutil.getlimit(opts)
1468 limit = logcmdutil.getlimit(opts)
1464 no_merges = opts.get(b'no_merges')
1469 no_merges = opts.get(b'no_merges')
1465 if opts.get(b'newest_first'):
1470 if opts.get(b'newest_first'):
1466 revs.reverse()
1471 revs.reverse()
1467 if limit is None and not no_merges:
1472 if limit is None and not no_merges:
1468 for r in revs:
1473 for r in revs:
1469 yield r
1474 yield r
1470 return
1475 return
1471
1476
1472 count = 0
1477 count = 0
1473 cl = repo.changelog
1478 cl = repo.changelog
1474 for n in revs:
1479 for n in revs:
1475 if limit is not None and count >= limit:
1480 if limit is not None and count >= limit:
1476 break
1481 break
1477 parents = [p for p in cl.parents(n) if p != repo.nullid]
1482 parents = [p for p in cl.parents(n) if p != repo.nullid]
1478 if no_merges and len(parents) == 2:
1483 if no_merges and len(parents) == 2:
1479 continue
1484 continue
1480 count += 1
1485 count += 1
1481 yield n
1486 yield n
1482
1487
1483
1488
1484 def outgoing(ui, repo, dests, opts, subpath=None):
1489 def outgoing(ui, repo, dests, opts, subpath=None):
1485 if opts.get(b'graph'):
1490 if opts.get(b'graph'):
1486 logcmdutil.checkunsupportedgraphflags([], opts)
1491 logcmdutil.checkunsupportedgraphflags([], opts)
1487 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1492 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1488 ret = 1
1493 ret = 1
1489 try:
1494 try:
1490 if o:
1495 if o:
1491 ret = 0
1496 ret = 0
1492
1497
1493 if opts.get(b'graph'):
1498 if opts.get(b'graph'):
1494 revdag = logcmdutil.graphrevs(repo, o, opts)
1499 revdag = logcmdutil.graphrevs(repo, o, opts)
1495 ui.pager(b'outgoing')
1500 ui.pager(b'outgoing')
1496 displayer = logcmdutil.changesetdisplayer(
1501 displayer = logcmdutil.changesetdisplayer(
1497 ui, repo, opts, buffered=True
1502 ui, repo, opts, buffered=True
1498 )
1503 )
1499 logcmdutil.displaygraph(
1504 logcmdutil.displaygraph(
1500 ui, repo, revdag, displayer, graphmod.asciiedges
1505 ui, repo, revdag, displayer, graphmod.asciiedges
1501 )
1506 )
1502 else:
1507 else:
1503 ui.pager(b'outgoing')
1508 ui.pager(b'outgoing')
1504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1509 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1505 for n in _outgoing_filter(repo, o, opts):
1510 for n in _outgoing_filter(repo, o, opts):
1506 displayer.show(repo[n])
1511 displayer.show(repo[n])
1507 displayer.close()
1512 displayer.close()
1508 for oth in others:
1513 for oth in others:
1509 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1514 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1510 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1515 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1511 return ret # exit code is zero since we found outgoing changes
1516 return ret # exit code is zero since we found outgoing changes
1512 finally:
1517 finally:
1513 for oth in others:
1518 for oth in others:
1514 oth.close()
1519 oth.close()
1515
1520
1516
1521
1517 def verify(repo, level=None):
1522 def verify(repo, level=None):
1518 """verify the consistency of a repository"""
1523 """verify the consistency of a repository"""
1519 ret = verifymod.verify(repo, level=level)
1524 ret = verifymod.verify(repo, level=level)
1520
1525
1521 # Broken subrepo references in hidden csets don't seem worth worrying about,
1526 # Broken subrepo references in hidden csets don't seem worth worrying about,
1522 # since they can't be pushed/pulled, and --hidden can be used if they are a
1527 # since they can't be pushed/pulled, and --hidden can be used if they are a
1523 # concern.
1528 # concern.
1524
1529
1525 # pathto() is needed for -R case
1530 # pathto() is needed for -R case
1526 revs = repo.revs(
1531 revs = repo.revs(
1527 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1532 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1528 )
1533 )
1529
1534
1530 if revs:
1535 if revs:
1531 repo.ui.status(_(b'checking subrepo links\n'))
1536 repo.ui.status(_(b'checking subrepo links\n'))
1532 for rev in revs:
1537 for rev in revs:
1533 ctx = repo[rev]
1538 ctx = repo[rev]
1534 try:
1539 try:
1535 for subpath in ctx.substate:
1540 for subpath in ctx.substate:
1536 try:
1541 try:
1537 ret = (
1542 ret = (
1538 ctx.sub(subpath, allowcreate=False).verify() or ret
1543 ctx.sub(subpath, allowcreate=False).verify() or ret
1539 )
1544 )
1540 except error.RepoError as e:
1545 except error.RepoError as e:
1541 repo.ui.warn(b'%d: %s\n' % (rev, e))
1546 repo.ui.warn(b'%d: %s\n' % (rev, e))
1542 except Exception:
1547 except Exception:
1543 repo.ui.warn(
1548 repo.ui.warn(
1544 _(b'.hgsubstate is corrupt in revision %s\n')
1549 _(b'.hgsubstate is corrupt in revision %s\n')
1545 % short(ctx.node())
1550 % short(ctx.node())
1546 )
1551 )
1547
1552
1548 return ret
1553 return ret
1549
1554
1550
1555
1551 def remoteui(src, opts):
1556 def remoteui(src, opts):
1552 """build a remote ui from ui or repo and opts"""
1557 """build a remote ui from ui or repo and opts"""
1553 if util.safehasattr(src, b'baseui'): # looks like a repository
1558 if util.safehasattr(src, b'baseui'): # looks like a repository
1554 dst = src.baseui.copy() # drop repo-specific config
1559 dst = src.baseui.copy() # drop repo-specific config
1555 src = src.ui # copy target options from repo
1560 src = src.ui # copy target options from repo
1556 else: # assume it's a global ui object
1561 else: # assume it's a global ui object
1557 dst = src.copy() # keep all global options
1562 dst = src.copy() # keep all global options
1558
1563
1559 # copy ssh-specific options
1564 # copy ssh-specific options
1560 for o in b'ssh', b'remotecmd':
1565 for o in b'ssh', b'remotecmd':
1561 v = opts.get(o) or src.config(b'ui', o)
1566 v = opts.get(o) or src.config(b'ui', o)
1562 if v:
1567 if v:
1563 dst.setconfig(b"ui", o, v, b'copied')
1568 dst.setconfig(b"ui", o, v, b'copied')
1564
1569
1565 # copy bundle-specific options
1570 # copy bundle-specific options
1566 r = src.config(b'bundle', b'mainreporoot')
1571 r = src.config(b'bundle', b'mainreporoot')
1567 if r:
1572 if r:
1568 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1573 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1569
1574
1570 # copy selected local settings to the remote ui
1575 # copy selected local settings to the remote ui
1571 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1576 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1572 for key, val in src.configitems(sect):
1577 for key, val in src.configitems(sect):
1573 dst.setconfig(sect, key, val, b'copied')
1578 dst.setconfig(sect, key, val, b'copied')
1574 v = src.config(b'web', b'cacerts')
1579 v = src.config(b'web', b'cacerts')
1575 if v:
1580 if v:
1576 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1581 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1577
1582
1578 return dst
1583 return dst
1579
1584
1580
1585
1581 # Files of interest
1586 # Files of interest
1582 # Used to check if the repository has changed looking at mtime and size of
1587 # Used to check if the repository has changed looking at mtime and size of
1583 # these files.
1588 # these files.
1584 foi = [
1589 foi = [
1585 (b'spath', b'00changelog.i'),
1590 (b'spath', b'00changelog.i'),
1586 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1591 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1587 (b'spath', b'obsstore'),
1592 (b'spath', b'obsstore'),
1588 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1593 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1589 ]
1594 ]
1590
1595
1591
1596
1592 class cachedlocalrepo:
1597 class cachedlocalrepo:
1593 """Holds a localrepository that can be cached and reused."""
1598 """Holds a localrepository that can be cached and reused."""
1594
1599
1595 def __init__(self, repo):
1600 def __init__(self, repo):
1596 """Create a new cached repo from an existing repo.
1601 """Create a new cached repo from an existing repo.
1597
1602
1598 We assume the passed in repo was recently created. If the
1603 We assume the passed in repo was recently created. If the
1599 repo has changed between when it was created and when it was
1604 repo has changed between when it was created and when it was
1600 turned into a cache, it may not refresh properly.
1605 turned into a cache, it may not refresh properly.
1601 """
1606 """
1602 assert isinstance(repo, localrepo.localrepository)
1607 assert isinstance(repo, localrepo.localrepository)
1603 self._repo = repo
1608 self._repo = repo
1604 self._state, self.mtime = self._repostate()
1609 self._state, self.mtime = self._repostate()
1605 self._filtername = repo.filtername
1610 self._filtername = repo.filtername
1606
1611
1607 def fetch(self):
1612 def fetch(self):
1608 """Refresh (if necessary) and return a repository.
1613 """Refresh (if necessary) and return a repository.
1609
1614
1610 If the cached instance is out of date, it will be recreated
1615 If the cached instance is out of date, it will be recreated
1611 automatically and returned.
1616 automatically and returned.
1612
1617
1613 Returns a tuple of the repo and a boolean indicating whether a new
1618 Returns a tuple of the repo and a boolean indicating whether a new
1614 repo instance was created.
1619 repo instance was created.
1615 """
1620 """
1616 # We compare the mtimes and sizes of some well-known files to
1621 # We compare the mtimes and sizes of some well-known files to
1617 # determine if the repo changed. This is not precise, as mtimes
1622 # determine if the repo changed. This is not precise, as mtimes
1618 # are susceptible to clock skew and imprecise filesystems and
1623 # are susceptible to clock skew and imprecise filesystems and
1619 # file content can change while maintaining the same size.
1624 # file content can change while maintaining the same size.
1620
1625
1621 state, mtime = self._repostate()
1626 state, mtime = self._repostate()
1622 if state == self._state:
1627 if state == self._state:
1623 return self._repo, False
1628 return self._repo, False
1624
1629
1625 repo = repository(self._repo.baseui, self._repo.url())
1630 repo = repository(self._repo.baseui, self._repo.url())
1626 if self._filtername:
1631 if self._filtername:
1627 self._repo = repo.filtered(self._filtername)
1632 self._repo = repo.filtered(self._filtername)
1628 else:
1633 else:
1629 self._repo = repo.unfiltered()
1634 self._repo = repo.unfiltered()
1630 self._state = state
1635 self._state = state
1631 self.mtime = mtime
1636 self.mtime = mtime
1632
1637
1633 return self._repo, True
1638 return self._repo, True
1634
1639
1635 def _repostate(self):
1640 def _repostate(self):
1636 state = []
1641 state = []
1637 maxmtime = -1
1642 maxmtime = -1
1638 for attr, fname in foi:
1643 for attr, fname in foi:
1639 prefix = getattr(self._repo, attr)
1644 prefix = getattr(self._repo, attr)
1640 p = os.path.join(prefix, fname)
1645 p = os.path.join(prefix, fname)
1641 try:
1646 try:
1642 st = os.stat(p)
1647 st = os.stat(p)
1643 except OSError:
1648 except OSError:
1644 st = os.stat(prefix)
1649 st = os.stat(prefix)
1645 state.append((st[stat.ST_MTIME], st.st_size))
1650 state.append((st[stat.ST_MTIME], st.st_size))
1646 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1651 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1647
1652
1648 return tuple(state), maxmtime
1653 return tuple(state), maxmtime
1649
1654
1650 def copy(self):
1655 def copy(self):
1651 """Obtain a copy of this class instance.
1656 """Obtain a copy of this class instance.
1652
1657
1653 A new localrepository instance is obtained. The new instance should be
1658 A new localrepository instance is obtained. The new instance should be
1654 completely independent of the original.
1659 completely independent of the original.
1655 """
1660 """
1656 repo = repository(self._repo.baseui, self._repo.origroot)
1661 repo = repository(self._repo.baseui, self._repo.origroot)
1657 if self._filtername:
1662 if self._filtername:
1658 repo = repo.filtered(self._filtername)
1663 repo = repo.filtered(self._filtername)
1659 else:
1664 else:
1660 repo = repo.unfiltered()
1665 repo = repo.unfiltered()
1661 c = cachedlocalrepo(repo)
1666 c = cachedlocalrepo(repo)
1662 c._state = self._state
1667 c._state = self._state
1663 c.mtime = self.mtime
1668 c.mtime = self.mtime
1664 return c
1669 return c
General Comments 0
You need to be logged in to leave comments. Login now