##// END OF EJS Templates
path: use `get_clone_path_obj` in share...
marmoute -
r50639:5f36784c default
parent child Browse files
Show More
@@ -1,1647 +1,1648 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 msg = b' > reposetup for %s took %s\n'
207 msg = b' > reposetup for %s took %s\n'
208 ui.log(b'extension', msg, name, stats)
208 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213
213
214
214
215 def repository(
215 def repository(
216 ui,
216 ui,
217 path=b'',
217 path=b'',
218 create=False,
218 create=False,
219 presetupfuncs=None,
219 presetupfuncs=None,
220 intents=None,
220 intents=None,
221 createopts=None,
221 createopts=None,
222 ):
222 ):
223 """return a repository object for the specified path"""
223 """return a repository object for the specified path"""
224 scheme = urlutil.url(path).scheme
224 scheme = urlutil.url(path).scheme
225 if scheme is None:
225 if scheme is None:
226 scheme = b'file'
226 scheme = b'file'
227 cls = repo_schemes.get(scheme)
227 cls = repo_schemes.get(scheme)
228 if cls is None:
228 if cls is None:
229 if scheme in peer_schemes:
229 if scheme in peer_schemes:
230 raise error.Abort(_(b"repository '%s' is not local") % path)
230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 cls = LocalFactory
231 cls = LocalFactory
232 repo = cls.instance(
232 repo = cls.instance(
233 ui,
233 ui,
234 path,
234 path,
235 create,
235 create,
236 intents=intents,
236 intents=intents,
237 createopts=createopts,
237 createopts=createopts,
238 )
238 )
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
241
241
242
242
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
246 if util.safehasattr(path, 'url'):
246 if util.safehasattr(path, 'url'):
247 # this is a urlutil.path object
247 # this is a urlutil.path object
248 scheme = path.url.scheme # pytype: disable=attribute-error
248 scheme = path.url.scheme # pytype: disable=attribute-error
249 # XXX for now we don't do anything more than that
249 # XXX for now we don't do anything more than that
250 path = path.loc # pytype: disable=attribute-error
250 path = path.loc # pytype: disable=attribute-error
251 else:
251 else:
252 scheme = urlutil.url(path).scheme
252 scheme = urlutil.url(path).scheme
253 if scheme in peer_schemes:
253 if scheme in peer_schemes:
254 cls = peer_schemes[scheme]
254 cls = peer_schemes[scheme]
255 peer = cls.instance(
255 peer = cls.instance(
256 rui,
256 rui,
257 path,
257 path,
258 create,
258 create,
259 intents=intents,
259 intents=intents,
260 createopts=createopts,
260 createopts=createopts,
261 )
261 )
262 _setup_repo_or_peer(rui, peer)
262 _setup_repo_or_peer(rui, peer)
263 else:
263 else:
264 # this is a repository
264 # this is a repository
265 repo = repository(
265 repo = repository(
266 rui,
266 rui,
267 path,
267 path,
268 create,
268 create,
269 intents=intents,
269 intents=intents,
270 createopts=createopts,
270 createopts=createopts,
271 )
271 )
272 peer = repo.peer()
272 peer = repo.peer()
273 return peer
273 return peer
274
274
275
275
276 def defaultdest(source):
276 def defaultdest(source):
277 """return default destination of clone if none is given
277 """return default destination of clone if none is given
278
278
279 >>> defaultdest(b'foo')
279 >>> defaultdest(b'foo')
280 'foo'
280 'foo'
281 >>> defaultdest(b'/foo/bar')
281 >>> defaultdest(b'/foo/bar')
282 'bar'
282 'bar'
283 >>> defaultdest(b'/')
283 >>> defaultdest(b'/')
284 ''
284 ''
285 >>> defaultdest(b'')
285 >>> defaultdest(b'')
286 ''
286 ''
287 >>> defaultdest(b'http://example.org/')
287 >>> defaultdest(b'http://example.org/')
288 ''
288 ''
289 >>> defaultdest(b'http://example.org/foo/')
289 >>> defaultdest(b'http://example.org/foo/')
290 'foo'
290 'foo'
291 """
291 """
292 path = urlutil.url(source).path
292 path = urlutil.url(source).path
293 if not path:
293 if not path:
294 return b''
294 return b''
295 return os.path.basename(os.path.normpath(path))
295 return os.path.basename(os.path.normpath(path))
296
296
297
297
298 def sharedreposource(repo):
298 def sharedreposource(repo):
299 """Returns repository object for source repository of a shared repo.
299 """Returns repository object for source repository of a shared repo.
300
300
301 If repo is not a shared repository, returns None.
301 If repo is not a shared repository, returns None.
302 """
302 """
303 if repo.sharedpath == repo.path:
303 if repo.sharedpath == repo.path:
304 return None
304 return None
305
305
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
306 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
307 return repo.srcrepo
307 return repo.srcrepo
308
308
309 # the sharedpath always ends in the .hg; we want the path to the repo
309 # the sharedpath always ends in the .hg; we want the path to the repo
310 source = repo.vfs.split(repo.sharedpath)[0]
310 source = repo.vfs.split(repo.sharedpath)[0]
311 srcurl, branches = urlutil.parseurl(source)
311 srcurl, branches = urlutil.parseurl(source)
312 srcrepo = repository(repo.ui, srcurl)
312 srcrepo = repository(repo.ui, srcurl)
313 repo.srcrepo = srcrepo
313 repo.srcrepo = srcrepo
314 return srcrepo
314 return srcrepo
315
315
316
316
317 def share(
317 def share(
318 ui,
318 ui,
319 source,
319 source,
320 dest=None,
320 dest=None,
321 update=True,
321 update=True,
322 bookmarks=True,
322 bookmarks=True,
323 defaultpath=None,
323 defaultpath=None,
324 relative=False,
324 relative=False,
325 ):
325 ):
326 '''create a shared repository'''
326 '''create a shared repository'''
327
327
328 not_local_msg = _(b'can only share local repositories')
328 not_local_msg = _(b'can only share local repositories')
329 if util.safehasattr(source, 'local'):
329 if util.safehasattr(source, 'local'):
330 if source.local() is None:
330 if source.local() is None:
331 raise error.Abort(not_local_msg)
331 raise error.Abort(not_local_msg)
332 elif not islocal(source):
332 elif not islocal(source):
333 # XXX why are we getting bytes here ?
333 # XXX why are we getting bytes here ?
334 raise error.Abort(not_local_msg)
334 raise error.Abort(not_local_msg)
335
335
336 if not dest:
336 if not dest:
337 dest = defaultdest(source)
337 dest = defaultdest(source)
338 else:
338 else:
339 dest = urlutil.get_clone_path(ui, dest)[1]
339 dest = urlutil.get_clone_path_obj(ui, dest).loc
340
340
341 if isinstance(source, bytes):
341 if isinstance(source, bytes):
342 origsource, source, branches = urlutil.get_clone_path(ui, source)
342 source_path = urlutil.get_clone_path_obj(ui, source)
343 srcrepo = repository(ui, source)
343 srcrepo = repository(ui, source_path.loc)
344 branches = (source_path.branch, [])
344 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
345 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
345 else:
346 else:
346 srcrepo = source.local()
347 srcrepo = source.local()
347 checkout = None
348 checkout = None
348
349
349 shareditems = set()
350 shareditems = set()
350 if bookmarks:
351 if bookmarks:
351 shareditems.add(sharedbookmarks)
352 shareditems.add(sharedbookmarks)
352
353
353 r = repository(
354 r = repository(
354 ui,
355 ui,
355 dest,
356 dest,
356 create=True,
357 create=True,
357 createopts={
358 createopts={
358 b'sharedrepo': srcrepo,
359 b'sharedrepo': srcrepo,
359 b'sharedrelative': relative,
360 b'sharedrelative': relative,
360 b'shareditems': shareditems,
361 b'shareditems': shareditems,
361 },
362 },
362 )
363 )
363
364
364 postshare(srcrepo, r, defaultpath=defaultpath)
365 postshare(srcrepo, r, defaultpath=defaultpath)
365 r = repository(ui, dest)
366 r = repository(ui, dest)
366 _postshareupdate(r, update, checkout=checkout)
367 _postshareupdate(r, update, checkout=checkout)
367 return r
368 return r
368
369
369
370
370 def _prependsourcehgrc(repo):
371 def _prependsourcehgrc(repo):
371 """copies the source repo config and prepend it in current repo .hg/hgrc
372 """copies the source repo config and prepend it in current repo .hg/hgrc
372 on unshare. This is only done if the share was perfomed using share safe
373 on unshare. This is only done if the share was perfomed using share safe
373 method where we share config of source in shares"""
374 method where we share config of source in shares"""
374 srcvfs = vfsmod.vfs(repo.sharedpath)
375 srcvfs = vfsmod.vfs(repo.sharedpath)
375 dstvfs = vfsmod.vfs(repo.path)
376 dstvfs = vfsmod.vfs(repo.path)
376
377
377 if not srcvfs.exists(b'hgrc'):
378 if not srcvfs.exists(b'hgrc'):
378 return
379 return
379
380
380 currentconfig = b''
381 currentconfig = b''
381 if dstvfs.exists(b'hgrc'):
382 if dstvfs.exists(b'hgrc'):
382 currentconfig = dstvfs.read(b'hgrc')
383 currentconfig = dstvfs.read(b'hgrc')
383
384
384 with dstvfs(b'hgrc', b'wb') as fp:
385 with dstvfs(b'hgrc', b'wb') as fp:
385 sourceconfig = srcvfs.read(b'hgrc')
386 sourceconfig = srcvfs.read(b'hgrc')
386 fp.write(b"# Config copied from shared source\n")
387 fp.write(b"# Config copied from shared source\n")
387 fp.write(sourceconfig)
388 fp.write(sourceconfig)
388 fp.write(b'\n')
389 fp.write(b'\n')
389 fp.write(currentconfig)
390 fp.write(currentconfig)
390
391
391
392
392 def unshare(ui, repo):
393 def unshare(ui, repo):
393 """convert a shared repository to a normal one
394 """convert a shared repository to a normal one
394
395
395 Copy the store data to the repo and remove the sharedpath data.
396 Copy the store data to the repo and remove the sharedpath data.
396
397
397 Returns a new repository object representing the unshared repository.
398 Returns a new repository object representing the unshared repository.
398
399
399 The passed repository object is not usable after this function is
400 The passed repository object is not usable after this function is
400 called.
401 called.
401 """
402 """
402
403
403 with repo.lock():
404 with repo.lock():
404 # we use locks here because if we race with commit, we
405 # we use locks here because if we race with commit, we
405 # can end up with extra data in the cloned revlogs that's
406 # can end up with extra data in the cloned revlogs that's
406 # not pointed to by changesets, thus causing verify to
407 # not pointed to by changesets, thus causing verify to
407 # fail
408 # fail
408 destlock = copystore(ui, repo, repo.path)
409 destlock = copystore(ui, repo, repo.path)
409 with destlock or util.nullcontextmanager():
410 with destlock or util.nullcontextmanager():
410 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
411 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
411 # we were sharing .hg/hgrc of the share source with the current
412 # we were sharing .hg/hgrc of the share source with the current
412 # repo. We need to copy that while unsharing otherwise it can
413 # repo. We need to copy that while unsharing otherwise it can
413 # disable hooks and other checks
414 # disable hooks and other checks
414 _prependsourcehgrc(repo)
415 _prependsourcehgrc(repo)
415
416
416 sharefile = repo.vfs.join(b'sharedpath')
417 sharefile = repo.vfs.join(b'sharedpath')
417 util.rename(sharefile, sharefile + b'.old')
418 util.rename(sharefile, sharefile + b'.old')
418
419
419 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
421 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
421 scmutil.writereporequirements(repo)
422 scmutil.writereporequirements(repo)
422
423
423 # Removing share changes some fundamental properties of the repo instance.
424 # Removing share changes some fundamental properties of the repo instance.
424 # So we instantiate a new repo object and operate on it rather than
425 # So we instantiate a new repo object and operate on it rather than
425 # try to keep the existing repo usable.
426 # try to keep the existing repo usable.
426 newrepo = repository(repo.baseui, repo.root, create=False)
427 newrepo = repository(repo.baseui, repo.root, create=False)
427
428
428 # TODO: figure out how to access subrepos that exist, but were previously
429 # TODO: figure out how to access subrepos that exist, but were previously
429 # removed from .hgsub
430 # removed from .hgsub
430 c = newrepo[b'.']
431 c = newrepo[b'.']
431 subs = c.substate
432 subs = c.substate
432 for s in sorted(subs):
433 for s in sorted(subs):
433 c.sub(s).unshare()
434 c.sub(s).unshare()
434
435
435 localrepo.poisonrepository(repo)
436 localrepo.poisonrepository(repo)
436
437
437 return newrepo
438 return newrepo
438
439
439
440
440 def postshare(sourcerepo, destrepo, defaultpath=None):
441 def postshare(sourcerepo, destrepo, defaultpath=None):
441 """Called after a new shared repo is created.
442 """Called after a new shared repo is created.
442
443
443 The new repo only has a requirements file and pointer to the source.
444 The new repo only has a requirements file and pointer to the source.
444 This function configures additional shared data.
445 This function configures additional shared data.
445
446
446 Extensions can wrap this function and write additional entries to
447 Extensions can wrap this function and write additional entries to
447 destrepo/.hg/shared to indicate additional pieces of data to be shared.
448 destrepo/.hg/shared to indicate additional pieces of data to be shared.
448 """
449 """
449 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
450 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
450 if default:
451 if default:
451 template = b'[paths]\ndefault = %s\n'
452 template = b'[paths]\ndefault = %s\n'
452 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
453 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
453 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
454 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
454 with destrepo.wlock():
455 with destrepo.wlock():
455 narrowspec.copytoworkingcopy(destrepo)
456 narrowspec.copytoworkingcopy(destrepo)
456
457
457
458
458 def _postshareupdate(repo, update, checkout=None):
459 def _postshareupdate(repo, update, checkout=None):
459 """Maybe perform a working directory update after a shared repo is created.
460 """Maybe perform a working directory update after a shared repo is created.
460
461
461 ``update`` can be a boolean or a revision to update to.
462 ``update`` can be a boolean or a revision to update to.
462 """
463 """
463 if not update:
464 if not update:
464 return
465 return
465
466
466 repo.ui.status(_(b"updating working directory\n"))
467 repo.ui.status(_(b"updating working directory\n"))
467 if update is not True:
468 if update is not True:
468 checkout = update
469 checkout = update
469 for test in (checkout, b'default', b'tip'):
470 for test in (checkout, b'default', b'tip'):
470 if test is None:
471 if test is None:
471 continue
472 continue
472 try:
473 try:
473 uprev = repo.lookup(test)
474 uprev = repo.lookup(test)
474 break
475 break
475 except error.RepoLookupError:
476 except error.RepoLookupError:
476 continue
477 continue
477 _update(repo, uprev)
478 _update(repo, uprev)
478
479
479
480
480 def copystore(ui, srcrepo, destpath):
481 def copystore(ui, srcrepo, destpath):
481 """copy files from store of srcrepo in destpath
482 """copy files from store of srcrepo in destpath
482
483
483 returns destlock
484 returns destlock
484 """
485 """
485 destlock = None
486 destlock = None
486 try:
487 try:
487 hardlink = None
488 hardlink = None
488 topic = _(b'linking') if hardlink else _(b'copying')
489 topic = _(b'linking') if hardlink else _(b'copying')
489 with ui.makeprogress(topic, unit=_(b'files')) as progress:
490 with ui.makeprogress(topic, unit=_(b'files')) as progress:
490 num = 0
491 num = 0
491 srcpublishing = srcrepo.publishing()
492 srcpublishing = srcrepo.publishing()
492 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
493 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
493 dstvfs = vfsmod.vfs(destpath)
494 dstvfs = vfsmod.vfs(destpath)
494 for f in srcrepo.store.copylist():
495 for f in srcrepo.store.copylist():
495 if srcpublishing and f.endswith(b'phaseroots'):
496 if srcpublishing and f.endswith(b'phaseroots'):
496 continue
497 continue
497 dstbase = os.path.dirname(f)
498 dstbase = os.path.dirname(f)
498 if dstbase and not dstvfs.exists(dstbase):
499 if dstbase and not dstvfs.exists(dstbase):
499 dstvfs.mkdir(dstbase)
500 dstvfs.mkdir(dstbase)
500 if srcvfs.exists(f):
501 if srcvfs.exists(f):
501 if f.endswith(b'data'):
502 if f.endswith(b'data'):
502 # 'dstbase' may be empty (e.g. revlog format 0)
503 # 'dstbase' may be empty (e.g. revlog format 0)
503 lockfile = os.path.join(dstbase, b"lock")
504 lockfile = os.path.join(dstbase, b"lock")
504 # lock to avoid premature writing to the target
505 # lock to avoid premature writing to the target
505 destlock = lock.lock(dstvfs, lockfile)
506 destlock = lock.lock(dstvfs, lockfile)
506 hardlink, n = util.copyfiles(
507 hardlink, n = util.copyfiles(
507 srcvfs.join(f), dstvfs.join(f), hardlink, progress
508 srcvfs.join(f), dstvfs.join(f), hardlink, progress
508 )
509 )
509 num += n
510 num += n
510 if hardlink:
511 if hardlink:
511 ui.debug(b"linked %d files\n" % num)
512 ui.debug(b"linked %d files\n" % num)
512 else:
513 else:
513 ui.debug(b"copied %d files\n" % num)
514 ui.debug(b"copied %d files\n" % num)
514 return destlock
515 return destlock
515 except: # re-raises
516 except: # re-raises
516 release(destlock)
517 release(destlock)
517 raise
518 raise
518
519
519
520
520 def clonewithshare(
521 def clonewithshare(
521 ui,
522 ui,
522 peeropts,
523 peeropts,
523 sharepath,
524 sharepath,
524 source,
525 source,
525 srcpeer,
526 srcpeer,
526 dest,
527 dest,
527 pull=False,
528 pull=False,
528 rev=None,
529 rev=None,
529 update=True,
530 update=True,
530 stream=False,
531 stream=False,
531 ):
532 ):
532 """Perform a clone using a shared repo.
533 """Perform a clone using a shared repo.
533
534
534 The store for the repository will be located at <sharepath>/.hg. The
535 The store for the repository will be located at <sharepath>/.hg. The
535 specified revisions will be cloned or pulled from "source". A shared repo
536 specified revisions will be cloned or pulled from "source". A shared repo
536 will be created at "dest" and a working copy will be created if "update" is
537 will be created at "dest" and a working copy will be created if "update" is
537 True.
538 True.
538 """
539 """
539 revs = None
540 revs = None
540 if rev:
541 if rev:
541 if not srcpeer.capable(b'lookup'):
542 if not srcpeer.capable(b'lookup'):
542 raise error.Abort(
543 raise error.Abort(
543 _(
544 _(
544 b"src repository does not support "
545 b"src repository does not support "
545 b"revision lookup and so doesn't "
546 b"revision lookup and so doesn't "
546 b"support clone by revision"
547 b"support clone by revision"
547 )
548 )
548 )
549 )
549
550
550 # TODO this is batchable.
551 # TODO this is batchable.
551 remoterevs = []
552 remoterevs = []
552 for r in rev:
553 for r in rev:
553 with srcpeer.commandexecutor() as e:
554 with srcpeer.commandexecutor() as e:
554 remoterevs.append(
555 remoterevs.append(
555 e.callcommand(
556 e.callcommand(
556 b'lookup',
557 b'lookup',
557 {
558 {
558 b'key': r,
559 b'key': r,
559 },
560 },
560 ).result()
561 ).result()
561 )
562 )
562 revs = remoterevs
563 revs = remoterevs
563
564
564 # Obtain a lock before checking for or cloning the pooled repo otherwise
565 # Obtain a lock before checking for or cloning the pooled repo otherwise
565 # 2 clients may race creating or populating it.
566 # 2 clients may race creating or populating it.
566 pooldir = os.path.dirname(sharepath)
567 pooldir = os.path.dirname(sharepath)
567 # lock class requires the directory to exist.
568 # lock class requires the directory to exist.
568 try:
569 try:
569 util.makedir(pooldir, False)
570 util.makedir(pooldir, False)
570 except FileExistsError:
571 except FileExistsError:
571 pass
572 pass
572
573
573 poolvfs = vfsmod.vfs(pooldir)
574 poolvfs = vfsmod.vfs(pooldir)
574 basename = os.path.basename(sharepath)
575 basename = os.path.basename(sharepath)
575
576
576 with lock.lock(poolvfs, b'%s.lock' % basename):
577 with lock.lock(poolvfs, b'%s.lock' % basename):
577 if os.path.exists(sharepath):
578 if os.path.exists(sharepath):
578 ui.status(
579 ui.status(
579 _(b'(sharing from existing pooled repository %s)\n') % basename
580 _(b'(sharing from existing pooled repository %s)\n') % basename
580 )
581 )
581 else:
582 else:
582 ui.status(
583 ui.status(
583 _(b'(sharing from new pooled repository %s)\n') % basename
584 _(b'(sharing from new pooled repository %s)\n') % basename
584 )
585 )
585 # Always use pull mode because hardlinks in share mode don't work
586 # Always use pull mode because hardlinks in share mode don't work
586 # well. Never update because working copies aren't necessary in
587 # well. Never update because working copies aren't necessary in
587 # share mode.
588 # share mode.
588 clone(
589 clone(
589 ui,
590 ui,
590 peeropts,
591 peeropts,
591 source,
592 source,
592 dest=sharepath,
593 dest=sharepath,
593 pull=True,
594 pull=True,
594 revs=rev,
595 revs=rev,
595 update=False,
596 update=False,
596 stream=stream,
597 stream=stream,
597 )
598 )
598
599
599 # Resolve the value to put in [paths] section for the source.
600 # Resolve the value to put in [paths] section for the source.
600 if islocal(source):
601 if islocal(source):
601 defaultpath = util.abspath(urlutil.urllocalpath(source))
602 defaultpath = util.abspath(urlutil.urllocalpath(source))
602 else:
603 else:
603 defaultpath = source
604 defaultpath = source
604
605
605 sharerepo = repository(ui, path=sharepath)
606 sharerepo = repository(ui, path=sharepath)
606 destrepo = share(
607 destrepo = share(
607 ui,
608 ui,
608 sharerepo,
609 sharerepo,
609 dest=dest,
610 dest=dest,
610 update=False,
611 update=False,
611 bookmarks=False,
612 bookmarks=False,
612 defaultpath=defaultpath,
613 defaultpath=defaultpath,
613 )
614 )
614
615
615 # We need to perform a pull against the dest repo to fetch bookmarks
616 # We need to perform a pull against the dest repo to fetch bookmarks
616 # and other non-store data that isn't shared by default. In the case of
617 # and other non-store data that isn't shared by default. In the case of
617 # non-existing shared repo, this means we pull from the remote twice. This
618 # non-existing shared repo, this means we pull from the remote twice. This
618 # is a bit weird. But at the time it was implemented, there wasn't an easy
619 # is a bit weird. But at the time it was implemented, there wasn't an easy
619 # way to pull just non-changegroup data.
620 # way to pull just non-changegroup data.
620 exchange.pull(destrepo, srcpeer, heads=revs)
621 exchange.pull(destrepo, srcpeer, heads=revs)
621
622
622 _postshareupdate(destrepo, update)
623 _postshareupdate(destrepo, update)
623
624
624 return srcpeer, peer(ui, peeropts, dest)
625 return srcpeer, peer(ui, peeropts, dest)
625
626
626
627
627 # Recomputing caches is often slow on big repos, so copy them.
628 # Recomputing caches is often slow on big repos, so copy them.
628 def _copycache(srcrepo, dstcachedir, fname):
629 def _copycache(srcrepo, dstcachedir, fname):
629 """copy a cache from srcrepo to destcachedir (if it exists)"""
630 """copy a cache from srcrepo to destcachedir (if it exists)"""
630 srcfname = srcrepo.cachevfs.join(fname)
631 srcfname = srcrepo.cachevfs.join(fname)
631 dstfname = os.path.join(dstcachedir, fname)
632 dstfname = os.path.join(dstcachedir, fname)
632 if os.path.exists(srcfname):
633 if os.path.exists(srcfname):
633 if not os.path.exists(dstcachedir):
634 if not os.path.exists(dstcachedir):
634 os.mkdir(dstcachedir)
635 os.mkdir(dstcachedir)
635 util.copyfile(srcfname, dstfname)
636 util.copyfile(srcfname, dstfname)
636
637
637
638
638 def clone(
639 def clone(
639 ui,
640 ui,
640 peeropts,
641 peeropts,
641 source,
642 source,
642 dest=None,
643 dest=None,
643 pull=False,
644 pull=False,
644 revs=None,
645 revs=None,
645 update=True,
646 update=True,
646 stream=False,
647 stream=False,
647 branch=None,
648 branch=None,
648 shareopts=None,
649 shareopts=None,
649 storeincludepats=None,
650 storeincludepats=None,
650 storeexcludepats=None,
651 storeexcludepats=None,
651 depth=None,
652 depth=None,
652 ):
653 ):
653 """Make a copy of an existing repository.
654 """Make a copy of an existing repository.
654
655
655 Create a copy of an existing repository in a new directory. The
656 Create a copy of an existing repository in a new directory. The
656 source and destination are URLs, as passed to the repository
657 source and destination are URLs, as passed to the repository
657 function. Returns a pair of repository peers, the source and
658 function. Returns a pair of repository peers, the source and
658 newly created destination.
659 newly created destination.
659
660
660 The location of the source is added to the new repository's
661 The location of the source is added to the new repository's
661 .hg/hgrc file, as the default to be used for future pulls and
662 .hg/hgrc file, as the default to be used for future pulls and
662 pushes.
663 pushes.
663
664
664 If an exception is raised, the partly cloned/updated destination
665 If an exception is raised, the partly cloned/updated destination
665 repository will be deleted.
666 repository will be deleted.
666
667
667 Arguments:
668 Arguments:
668
669
669 source: repository object or URL
670 source: repository object or URL
670
671
671 dest: URL of destination repository to create (defaults to base
672 dest: URL of destination repository to create (defaults to base
672 name of source repository)
673 name of source repository)
673
674
674 pull: always pull from source repository, even in local case or if the
675 pull: always pull from source repository, even in local case or if the
675 server prefers streaming
676 server prefers streaming
676
677
677 stream: stream raw data uncompressed from repository (fast over
678 stream: stream raw data uncompressed from repository (fast over
678 LAN, slow over WAN)
679 LAN, slow over WAN)
679
680
680 revs: revision to clone up to (implies pull=True)
681 revs: revision to clone up to (implies pull=True)
681
682
682 update: update working directory after clone completes, if
683 update: update working directory after clone completes, if
683 destination is local repository (True means update to default rev,
684 destination is local repository (True means update to default rev,
684 anything else is treated as a revision)
685 anything else is treated as a revision)
685
686
686 branch: branches to clone
687 branch: branches to clone
687
688
688 shareopts: dict of options to control auto sharing behavior. The "pool" key
689 shareopts: dict of options to control auto sharing behavior. The "pool" key
689 activates auto sharing mode and defines the directory for stores. The
690 activates auto sharing mode and defines the directory for stores. The
690 "mode" key determines how to construct the directory name of the shared
691 "mode" key determines how to construct the directory name of the shared
691 repository. "identity" means the name is derived from the node of the first
692 repository. "identity" means the name is derived from the node of the first
692 changeset in the repository. "remote" means the name is derived from the
693 changeset in the repository. "remote" means the name is derived from the
693 remote's path/URL. Defaults to "identity."
694 remote's path/URL. Defaults to "identity."
694
695
695 storeincludepats and storeexcludepats: sets of file patterns to include and
696 storeincludepats and storeexcludepats: sets of file patterns to include and
696 exclude in the repository copy, respectively. If not defined, all files
697 exclude in the repository copy, respectively. If not defined, all files
697 will be included (a "full" clone). Otherwise a "narrow" clone containing
698 will be included (a "full" clone). Otherwise a "narrow" clone containing
698 only the requested files will be performed. If ``storeincludepats`` is not
699 only the requested files will be performed. If ``storeincludepats`` is not
699 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
700 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
700 ``path:.``. If both are empty sets, no files will be cloned.
701 ``path:.``. If both are empty sets, no files will be cloned.
701 """
702 """
702
703
703 if isinstance(source, bytes):
704 if isinstance(source, bytes):
704 src = urlutil.get_clone_path(ui, source, branch)
705 src = urlutil.get_clone_path(ui, source, branch)
705 origsource, source, branches = src
706 origsource, source, branches = src
706 srcpeer = peer(ui, peeropts, source)
707 srcpeer = peer(ui, peeropts, source)
707 else:
708 else:
708 srcpeer = source.peer() # in case we were called with a localrepo
709 srcpeer = source.peer() # in case we were called with a localrepo
709 branches = (None, branch or [])
710 branches = (None, branch or [])
710 origsource = source = srcpeer.url()
711 origsource = source = srcpeer.url()
711 srclock = destlock = destwlock = cleandir = None
712 srclock = destlock = destwlock = cleandir = None
712 destpeer = None
713 destpeer = None
713 try:
714 try:
714 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
715 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
715
716
716 if dest is None:
717 if dest is None:
717 dest = defaultdest(source)
718 dest = defaultdest(source)
718 if dest:
719 if dest:
719 ui.status(_(b"destination directory: %s\n") % dest)
720 ui.status(_(b"destination directory: %s\n") % dest)
720 else:
721 else:
721 dest = urlutil.get_clone_path(ui, dest)[0]
722 dest = urlutil.get_clone_path(ui, dest)[0]
722
723
723 dest = urlutil.urllocalpath(dest)
724 dest = urlutil.urllocalpath(dest)
724 source = urlutil.urllocalpath(source)
725 source = urlutil.urllocalpath(source)
725
726
726 if not dest:
727 if not dest:
727 raise error.InputError(_(b"empty destination path is not valid"))
728 raise error.InputError(_(b"empty destination path is not valid"))
728
729
729 destvfs = vfsmod.vfs(dest, expandpath=True)
730 destvfs = vfsmod.vfs(dest, expandpath=True)
730 if destvfs.lexists():
731 if destvfs.lexists():
731 if not destvfs.isdir():
732 if not destvfs.isdir():
732 raise error.InputError(
733 raise error.InputError(
733 _(b"destination '%s' already exists") % dest
734 _(b"destination '%s' already exists") % dest
734 )
735 )
735 elif destvfs.listdir():
736 elif destvfs.listdir():
736 raise error.InputError(
737 raise error.InputError(
737 _(b"destination '%s' is not empty") % dest
738 _(b"destination '%s' is not empty") % dest
738 )
739 )
739
740
740 createopts = {}
741 createopts = {}
741 narrow = False
742 narrow = False
742
743
743 if storeincludepats is not None:
744 if storeincludepats is not None:
744 narrowspec.validatepatterns(storeincludepats)
745 narrowspec.validatepatterns(storeincludepats)
745 narrow = True
746 narrow = True
746
747
747 if storeexcludepats is not None:
748 if storeexcludepats is not None:
748 narrowspec.validatepatterns(storeexcludepats)
749 narrowspec.validatepatterns(storeexcludepats)
749 narrow = True
750 narrow = True
750
751
751 if narrow:
752 if narrow:
752 # Include everything by default if only exclusion patterns defined.
753 # Include everything by default if only exclusion patterns defined.
753 if storeexcludepats and not storeincludepats:
754 if storeexcludepats and not storeincludepats:
754 storeincludepats = {b'path:.'}
755 storeincludepats = {b'path:.'}
755
756
756 createopts[b'narrowfiles'] = True
757 createopts[b'narrowfiles'] = True
757
758
758 if depth:
759 if depth:
759 createopts[b'shallowfilestore'] = True
760 createopts[b'shallowfilestore'] = True
760
761
761 if srcpeer.capable(b'lfs-serve'):
762 if srcpeer.capable(b'lfs-serve'):
762 # Repository creation honors the config if it disabled the extension, so
763 # Repository creation honors the config if it disabled the extension, so
763 # we can't just announce that lfs will be enabled. This check avoids
764 # we can't just announce that lfs will be enabled. This check avoids
764 # saying that lfs will be enabled, and then saying it's an unknown
765 # saying that lfs will be enabled, and then saying it's an unknown
765 # feature. The lfs creation option is set in either case so that a
766 # feature. The lfs creation option is set in either case so that a
766 # requirement is added. If the extension is explicitly disabled but the
767 # requirement is added. If the extension is explicitly disabled but the
767 # requirement is set, the clone aborts early, before transferring any
768 # requirement is set, the clone aborts early, before transferring any
768 # data.
769 # data.
769 createopts[b'lfs'] = True
770 createopts[b'lfs'] = True
770
771
771 if extensions.disabled_help(b'lfs'):
772 if extensions.disabled_help(b'lfs'):
772 ui.status(
773 ui.status(
773 _(
774 _(
774 b'(remote is using large file support (lfs), but it is '
775 b'(remote is using large file support (lfs), but it is '
775 b'explicitly disabled in the local configuration)\n'
776 b'explicitly disabled in the local configuration)\n'
776 )
777 )
777 )
778 )
778 else:
779 else:
779 ui.status(
780 ui.status(
780 _(
781 _(
781 b'(remote is using large file support (lfs); lfs will '
782 b'(remote is using large file support (lfs); lfs will '
782 b'be enabled for this repository)\n'
783 b'be enabled for this repository)\n'
783 )
784 )
784 )
785 )
785
786
786 shareopts = shareopts or {}
787 shareopts = shareopts or {}
787 sharepool = shareopts.get(b'pool')
788 sharepool = shareopts.get(b'pool')
788 sharenamemode = shareopts.get(b'mode')
789 sharenamemode = shareopts.get(b'mode')
789 if sharepool and islocal(dest):
790 if sharepool and islocal(dest):
790 sharepath = None
791 sharepath = None
791 if sharenamemode == b'identity':
792 if sharenamemode == b'identity':
792 # Resolve the name from the initial changeset in the remote
793 # Resolve the name from the initial changeset in the remote
793 # repository. This returns nullid when the remote is empty. It
794 # repository. This returns nullid when the remote is empty. It
794 # raises RepoLookupError if revision 0 is filtered or otherwise
795 # raises RepoLookupError if revision 0 is filtered or otherwise
795 # not available. If we fail to resolve, sharing is not enabled.
796 # not available. If we fail to resolve, sharing is not enabled.
796 try:
797 try:
797 with srcpeer.commandexecutor() as e:
798 with srcpeer.commandexecutor() as e:
798 rootnode = e.callcommand(
799 rootnode = e.callcommand(
799 b'lookup',
800 b'lookup',
800 {
801 {
801 b'key': b'0',
802 b'key': b'0',
802 },
803 },
803 ).result()
804 ).result()
804
805
805 if rootnode != sha1nodeconstants.nullid:
806 if rootnode != sha1nodeconstants.nullid:
806 sharepath = os.path.join(sharepool, hex(rootnode))
807 sharepath = os.path.join(sharepool, hex(rootnode))
807 else:
808 else:
808 ui.status(
809 ui.status(
809 _(
810 _(
810 b'(not using pooled storage: '
811 b'(not using pooled storage: '
811 b'remote appears to be empty)\n'
812 b'remote appears to be empty)\n'
812 )
813 )
813 )
814 )
814 except error.RepoLookupError:
815 except error.RepoLookupError:
815 ui.status(
816 ui.status(
816 _(
817 _(
817 b'(not using pooled storage: '
818 b'(not using pooled storage: '
818 b'unable to resolve identity of remote)\n'
819 b'unable to resolve identity of remote)\n'
819 )
820 )
820 )
821 )
821 elif sharenamemode == b'remote':
822 elif sharenamemode == b'remote':
822 sharepath = os.path.join(
823 sharepath = os.path.join(
823 sharepool, hex(hashutil.sha1(source).digest())
824 sharepool, hex(hashutil.sha1(source).digest())
824 )
825 )
825 else:
826 else:
826 raise error.Abort(
827 raise error.Abort(
827 _(b'unknown share naming mode: %s') % sharenamemode
828 _(b'unknown share naming mode: %s') % sharenamemode
828 )
829 )
829
830
830 # TODO this is a somewhat arbitrary restriction.
831 # TODO this is a somewhat arbitrary restriction.
831 if narrow:
832 if narrow:
832 ui.status(
833 ui.status(
833 _(b'(pooled storage not supported for narrow clones)\n')
834 _(b'(pooled storage not supported for narrow clones)\n')
834 )
835 )
835 sharepath = None
836 sharepath = None
836
837
837 if sharepath:
838 if sharepath:
838 return clonewithshare(
839 return clonewithshare(
839 ui,
840 ui,
840 peeropts,
841 peeropts,
841 sharepath,
842 sharepath,
842 source,
843 source,
843 srcpeer,
844 srcpeer,
844 dest,
845 dest,
845 pull=pull,
846 pull=pull,
846 rev=revs,
847 rev=revs,
847 update=update,
848 update=update,
848 stream=stream,
849 stream=stream,
849 )
850 )
850
851
851 srcrepo = srcpeer.local()
852 srcrepo = srcpeer.local()
852
853
853 abspath = origsource
854 abspath = origsource
854 if islocal(origsource):
855 if islocal(origsource):
855 abspath = util.abspath(urlutil.urllocalpath(origsource))
856 abspath = util.abspath(urlutil.urllocalpath(origsource))
856
857
857 if islocal(dest):
858 if islocal(dest):
858 if os.path.exists(dest):
859 if os.path.exists(dest):
859 # only clean up directories we create ourselves
860 # only clean up directories we create ourselves
860 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
861 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
861 cleandir = hgdir
862 cleandir = hgdir
862 else:
863 else:
863 cleandir = dest
864 cleandir = dest
864
865
865 copy = False
866 copy = False
866 if (
867 if (
867 srcrepo
868 srcrepo
868 and srcrepo.cancopy()
869 and srcrepo.cancopy()
869 and islocal(dest)
870 and islocal(dest)
870 and not phases.hassecret(srcrepo)
871 and not phases.hassecret(srcrepo)
871 ):
872 ):
872 copy = not pull and not revs
873 copy = not pull and not revs
873
874
874 # TODO this is a somewhat arbitrary restriction.
875 # TODO this is a somewhat arbitrary restriction.
875 if narrow:
876 if narrow:
876 copy = False
877 copy = False
877
878
878 if copy:
879 if copy:
879 try:
880 try:
880 # we use a lock here because if we race with commit, we
881 # we use a lock here because if we race with commit, we
881 # can end up with extra data in the cloned revlogs that's
882 # can end up with extra data in the cloned revlogs that's
882 # not pointed to by changesets, thus causing verify to
883 # not pointed to by changesets, thus causing verify to
883 # fail
884 # fail
884 srclock = srcrepo.lock(wait=False)
885 srclock = srcrepo.lock(wait=False)
885 except error.LockError:
886 except error.LockError:
886 copy = False
887 copy = False
887
888
888 if copy:
889 if copy:
889 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
890 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
890
891
891 destrootpath = urlutil.urllocalpath(dest)
892 destrootpath = urlutil.urllocalpath(dest)
892 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
893 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
893 localrepo.createrepository(
894 localrepo.createrepository(
894 ui,
895 ui,
895 destrootpath,
896 destrootpath,
896 requirements=dest_reqs,
897 requirements=dest_reqs,
897 )
898 )
898 destrepo = localrepo.makelocalrepository(ui, destrootpath)
899 destrepo = localrepo.makelocalrepository(ui, destrootpath)
899
900
900 destwlock = destrepo.wlock()
901 destwlock = destrepo.wlock()
901 destlock = destrepo.lock()
902 destlock = destrepo.lock()
902 from . import streamclone # avoid cycle
903 from . import streamclone # avoid cycle
903
904
904 streamclone.local_copy(srcrepo, destrepo)
905 streamclone.local_copy(srcrepo, destrepo)
905
906
906 # we need to re-init the repo after manually copying the data
907 # we need to re-init the repo after manually copying the data
907 # into it
908 # into it
908 destpeer = peer(srcrepo, peeropts, dest)
909 destpeer = peer(srcrepo, peeropts, dest)
909
910
910 # make the peer aware that is it already locked
911 # make the peer aware that is it already locked
911 #
912 #
912 # important:
913 # important:
913 #
914 #
914 # We still need to release that lock at the end of the function
915 # We still need to release that lock at the end of the function
915 destpeer.local()._lockref = weakref.ref(destlock)
916 destpeer.local()._lockref = weakref.ref(destlock)
916 destpeer.local()._wlockref = weakref.ref(destwlock)
917 destpeer.local()._wlockref = weakref.ref(destwlock)
917 # dirstate also needs to be copied because `_wlockref` has a reference
918 # dirstate also needs to be copied because `_wlockref` has a reference
918 # to it: this dirstate is saved to disk when the wlock is released
919 # to it: this dirstate is saved to disk when the wlock is released
919 destpeer.local().dirstate = destrepo.dirstate
920 destpeer.local().dirstate = destrepo.dirstate
920
921
921 srcrepo.hook(
922 srcrepo.hook(
922 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
923 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
923 )
924 )
924 else:
925 else:
925 try:
926 try:
926 # only pass ui when no srcrepo
927 # only pass ui when no srcrepo
927 destpeer = peer(
928 destpeer = peer(
928 srcrepo or ui,
929 srcrepo or ui,
929 peeropts,
930 peeropts,
930 dest,
931 dest,
931 create=True,
932 create=True,
932 createopts=createopts,
933 createopts=createopts,
933 )
934 )
934 except FileExistsError:
935 except FileExistsError:
935 cleandir = None
936 cleandir = None
936 raise error.Abort(_(b"destination '%s' already exists") % dest)
937 raise error.Abort(_(b"destination '%s' already exists") % dest)
937
938
938 if revs:
939 if revs:
939 if not srcpeer.capable(b'lookup'):
940 if not srcpeer.capable(b'lookup'):
940 raise error.Abort(
941 raise error.Abort(
941 _(
942 _(
942 b"src repository does not support "
943 b"src repository does not support "
943 b"revision lookup and so doesn't "
944 b"revision lookup and so doesn't "
944 b"support clone by revision"
945 b"support clone by revision"
945 )
946 )
946 )
947 )
947
948
948 # TODO this is batchable.
949 # TODO this is batchable.
949 remoterevs = []
950 remoterevs = []
950 for rev in revs:
951 for rev in revs:
951 with srcpeer.commandexecutor() as e:
952 with srcpeer.commandexecutor() as e:
952 remoterevs.append(
953 remoterevs.append(
953 e.callcommand(
954 e.callcommand(
954 b'lookup',
955 b'lookup',
955 {
956 {
956 b'key': rev,
957 b'key': rev,
957 },
958 },
958 ).result()
959 ).result()
959 )
960 )
960 revs = remoterevs
961 revs = remoterevs
961
962
962 checkout = revs[0]
963 checkout = revs[0]
963 else:
964 else:
964 revs = None
965 revs = None
965 local = destpeer.local()
966 local = destpeer.local()
966 if local:
967 if local:
967 if narrow:
968 if narrow:
968 with local.wlock(), local.lock():
969 with local.wlock(), local.lock():
969 local.setnarrowpats(storeincludepats, storeexcludepats)
970 local.setnarrowpats(storeincludepats, storeexcludepats)
970 narrowspec.copytoworkingcopy(local)
971 narrowspec.copytoworkingcopy(local)
971
972
972 u = urlutil.url(abspath)
973 u = urlutil.url(abspath)
973 defaulturl = bytes(u)
974 defaulturl = bytes(u)
974 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
975 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
975 if not stream:
976 if not stream:
976 if pull:
977 if pull:
977 stream = False
978 stream = False
978 else:
979 else:
979 stream = None
980 stream = None
980 # internal config: ui.quietbookmarkmove
981 # internal config: ui.quietbookmarkmove
981 overrides = {(b'ui', b'quietbookmarkmove'): True}
982 overrides = {(b'ui', b'quietbookmarkmove'): True}
982 with local.ui.configoverride(overrides, b'clone'):
983 with local.ui.configoverride(overrides, b'clone'):
983 exchange.pull(
984 exchange.pull(
984 local,
985 local,
985 srcpeer,
986 srcpeer,
986 heads=revs,
987 heads=revs,
987 streamclonerequested=stream,
988 streamclonerequested=stream,
988 includepats=storeincludepats,
989 includepats=storeincludepats,
989 excludepats=storeexcludepats,
990 excludepats=storeexcludepats,
990 depth=depth,
991 depth=depth,
991 )
992 )
992 elif srcrepo:
993 elif srcrepo:
993 # TODO lift restriction once exchange.push() accepts narrow
994 # TODO lift restriction once exchange.push() accepts narrow
994 # push.
995 # push.
995 if narrow:
996 if narrow:
996 raise error.Abort(
997 raise error.Abort(
997 _(
998 _(
998 b'narrow clone not available for '
999 b'narrow clone not available for '
999 b'remote destinations'
1000 b'remote destinations'
1000 )
1001 )
1001 )
1002 )
1002
1003
1003 exchange.push(
1004 exchange.push(
1004 srcrepo,
1005 srcrepo,
1005 destpeer,
1006 destpeer,
1006 revs=revs,
1007 revs=revs,
1007 bookmarks=srcrepo._bookmarks.keys(),
1008 bookmarks=srcrepo._bookmarks.keys(),
1008 )
1009 )
1009 else:
1010 else:
1010 raise error.Abort(
1011 raise error.Abort(
1011 _(b"clone from remote to remote not supported")
1012 _(b"clone from remote to remote not supported")
1012 )
1013 )
1013
1014
1014 cleandir = None
1015 cleandir = None
1015
1016
1016 destrepo = destpeer.local()
1017 destrepo = destpeer.local()
1017 if destrepo:
1018 if destrepo:
1018 template = uimod.samplehgrcs[b'cloned']
1019 template = uimod.samplehgrcs[b'cloned']
1019 u = urlutil.url(abspath)
1020 u = urlutil.url(abspath)
1020 u.passwd = None
1021 u.passwd = None
1021 defaulturl = bytes(u)
1022 defaulturl = bytes(u)
1022 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1023 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1023 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1024 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1024
1025
1025 if ui.configbool(b'experimental', b'remotenames'):
1026 if ui.configbool(b'experimental', b'remotenames'):
1026 logexchange.pullremotenames(destrepo, srcpeer)
1027 logexchange.pullremotenames(destrepo, srcpeer)
1027
1028
1028 if update:
1029 if update:
1029 if update is not True:
1030 if update is not True:
1030 with srcpeer.commandexecutor() as e:
1031 with srcpeer.commandexecutor() as e:
1031 checkout = e.callcommand(
1032 checkout = e.callcommand(
1032 b'lookup',
1033 b'lookup',
1033 {
1034 {
1034 b'key': update,
1035 b'key': update,
1035 },
1036 },
1036 ).result()
1037 ).result()
1037
1038
1038 uprev = None
1039 uprev = None
1039 status = None
1040 status = None
1040 if checkout is not None:
1041 if checkout is not None:
1041 # Some extensions (at least hg-git and hg-subversion) have
1042 # Some extensions (at least hg-git and hg-subversion) have
1042 # a peer.lookup() implementation that returns a name instead
1043 # a peer.lookup() implementation that returns a name instead
1043 # of a nodeid. We work around it here until we've figured
1044 # of a nodeid. We work around it here until we've figured
1044 # out a better solution.
1045 # out a better solution.
1045 if len(checkout) == 20 and checkout in destrepo:
1046 if len(checkout) == 20 and checkout in destrepo:
1046 uprev = checkout
1047 uprev = checkout
1047 elif scmutil.isrevsymbol(destrepo, checkout):
1048 elif scmutil.isrevsymbol(destrepo, checkout):
1048 uprev = scmutil.revsymbol(destrepo, checkout).node()
1049 uprev = scmutil.revsymbol(destrepo, checkout).node()
1049 else:
1050 else:
1050 if update is not True:
1051 if update is not True:
1051 try:
1052 try:
1052 uprev = destrepo.lookup(update)
1053 uprev = destrepo.lookup(update)
1053 except error.RepoLookupError:
1054 except error.RepoLookupError:
1054 pass
1055 pass
1055 if uprev is None:
1056 if uprev is None:
1056 try:
1057 try:
1057 if destrepo._activebookmark:
1058 if destrepo._activebookmark:
1058 uprev = destrepo.lookup(destrepo._activebookmark)
1059 uprev = destrepo.lookup(destrepo._activebookmark)
1059 update = destrepo._activebookmark
1060 update = destrepo._activebookmark
1060 else:
1061 else:
1061 uprev = destrepo._bookmarks[b'@']
1062 uprev = destrepo._bookmarks[b'@']
1062 update = b'@'
1063 update = b'@'
1063 bn = destrepo[uprev].branch()
1064 bn = destrepo[uprev].branch()
1064 if bn == b'default':
1065 if bn == b'default':
1065 status = _(b"updating to bookmark %s\n" % update)
1066 status = _(b"updating to bookmark %s\n" % update)
1066 else:
1067 else:
1067 status = (
1068 status = (
1068 _(b"updating to bookmark %s on branch %s\n")
1069 _(b"updating to bookmark %s on branch %s\n")
1069 ) % (update, bn)
1070 ) % (update, bn)
1070 except KeyError:
1071 except KeyError:
1071 try:
1072 try:
1072 uprev = destrepo.branchtip(b'default')
1073 uprev = destrepo.branchtip(b'default')
1073 except error.RepoLookupError:
1074 except error.RepoLookupError:
1074 uprev = destrepo.lookup(b'tip')
1075 uprev = destrepo.lookup(b'tip')
1075 if not status:
1076 if not status:
1076 bn = destrepo[uprev].branch()
1077 bn = destrepo[uprev].branch()
1077 status = _(b"updating to branch %s\n") % bn
1078 status = _(b"updating to branch %s\n") % bn
1078 destrepo.ui.status(status)
1079 destrepo.ui.status(status)
1079 _update(destrepo, uprev)
1080 _update(destrepo, uprev)
1080 if update in destrepo._bookmarks:
1081 if update in destrepo._bookmarks:
1081 bookmarks.activate(destrepo, update)
1082 bookmarks.activate(destrepo, update)
1082 if destlock is not None:
1083 if destlock is not None:
1083 release(destlock)
1084 release(destlock)
1084 if destwlock is not None:
1085 if destwlock is not None:
1085 release(destlock)
1086 release(destlock)
1086 # here is a tiny windows were someone could end up writing the
1087 # here is a tiny windows were someone could end up writing the
1087 # repository before the cache are sure to be warm. This is "fine"
1088 # repository before the cache are sure to be warm. This is "fine"
1088 # as the only "bad" outcome would be some slowness. That potential
1089 # as the only "bad" outcome would be some slowness. That potential
1089 # slowness already affect reader.
1090 # slowness already affect reader.
1090 with destrepo.lock():
1091 with destrepo.lock():
1091 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1092 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1092 finally:
1093 finally:
1093 release(srclock, destlock, destwlock)
1094 release(srclock, destlock, destwlock)
1094 if cleandir is not None:
1095 if cleandir is not None:
1095 shutil.rmtree(cleandir, True)
1096 shutil.rmtree(cleandir, True)
1096 if srcpeer is not None:
1097 if srcpeer is not None:
1097 srcpeer.close()
1098 srcpeer.close()
1098 if destpeer and destpeer.local() is None:
1099 if destpeer and destpeer.local() is None:
1099 destpeer.close()
1100 destpeer.close()
1100 return srcpeer, destpeer
1101 return srcpeer, destpeer
1101
1102
1102
1103
1103 def _showstats(repo, stats, quietempty=False):
1104 def _showstats(repo, stats, quietempty=False):
1104 if quietempty and stats.isempty():
1105 if quietempty and stats.isempty():
1105 return
1106 return
1106 repo.ui.status(
1107 repo.ui.status(
1107 _(
1108 _(
1108 b"%d files updated, %d files merged, "
1109 b"%d files updated, %d files merged, "
1109 b"%d files removed, %d files unresolved\n"
1110 b"%d files removed, %d files unresolved\n"
1110 )
1111 )
1111 % (
1112 % (
1112 stats.updatedcount,
1113 stats.updatedcount,
1113 stats.mergedcount,
1114 stats.mergedcount,
1114 stats.removedcount,
1115 stats.removedcount,
1115 stats.unresolvedcount,
1116 stats.unresolvedcount,
1116 )
1117 )
1117 )
1118 )
1118
1119
1119
1120
1120 def updaterepo(repo, node, overwrite, updatecheck=None):
1121 def updaterepo(repo, node, overwrite, updatecheck=None):
1121 """Update the working directory to node.
1122 """Update the working directory to node.
1122
1123
1123 When overwrite is set, changes are clobbered, merged else
1124 When overwrite is set, changes are clobbered, merged else
1124
1125
1125 returns stats (see pydoc mercurial.merge.applyupdates)"""
1126 returns stats (see pydoc mercurial.merge.applyupdates)"""
1126 repo.ui.deprecwarn(
1127 repo.ui.deprecwarn(
1127 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1128 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1128 b'5.7',
1129 b'5.7',
1129 )
1130 )
1130 return mergemod._update(
1131 return mergemod._update(
1131 repo,
1132 repo,
1132 node,
1133 node,
1133 branchmerge=False,
1134 branchmerge=False,
1134 force=overwrite,
1135 force=overwrite,
1135 labels=[b'working copy', b'destination'],
1136 labels=[b'working copy', b'destination'],
1136 updatecheck=updatecheck,
1137 updatecheck=updatecheck,
1137 )
1138 )
1138
1139
1139
1140
1140 def update(repo, node, quietempty=False, updatecheck=None):
1141 def update(repo, node, quietempty=False, updatecheck=None):
1141 """update the working directory to node"""
1142 """update the working directory to node"""
1142 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1143 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1143 _showstats(repo, stats, quietempty)
1144 _showstats(repo, stats, quietempty)
1144 if stats.unresolvedcount:
1145 if stats.unresolvedcount:
1145 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1146 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1146 return stats.unresolvedcount > 0
1147 return stats.unresolvedcount > 0
1147
1148
1148
1149
1149 # naming conflict in clone()
1150 # naming conflict in clone()
1150 _update = update
1151 _update = update
1151
1152
1152
1153
1153 def clean(repo, node, show_stats=True, quietempty=False):
1154 def clean(repo, node, show_stats=True, quietempty=False):
1154 """forcibly switch the working directory to node, clobbering changes"""
1155 """forcibly switch the working directory to node, clobbering changes"""
1155 stats = mergemod.clean_update(repo[node])
1156 stats = mergemod.clean_update(repo[node])
1156 assert stats.unresolvedcount == 0
1157 assert stats.unresolvedcount == 0
1157 if show_stats:
1158 if show_stats:
1158 _showstats(repo, stats, quietempty)
1159 _showstats(repo, stats, quietempty)
1159 return False
1160 return False
1160
1161
1161
1162
1162 # naming conflict in updatetotally()
1163 # naming conflict in updatetotally()
1163 _clean = clean
1164 _clean = clean
1164
1165
1165 _VALID_UPDATECHECKS = {
1166 _VALID_UPDATECHECKS = {
1166 mergemod.UPDATECHECK_ABORT,
1167 mergemod.UPDATECHECK_ABORT,
1167 mergemod.UPDATECHECK_NONE,
1168 mergemod.UPDATECHECK_NONE,
1168 mergemod.UPDATECHECK_LINEAR,
1169 mergemod.UPDATECHECK_LINEAR,
1169 mergemod.UPDATECHECK_NO_CONFLICT,
1170 mergemod.UPDATECHECK_NO_CONFLICT,
1170 }
1171 }
1171
1172
1172
1173
1173 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1174 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1174 """Update the working directory with extra care for non-file components
1175 """Update the working directory with extra care for non-file components
1175
1176
1176 This takes care of non-file components below:
1177 This takes care of non-file components below:
1177
1178
1178 :bookmark: might be advanced or (in)activated
1179 :bookmark: might be advanced or (in)activated
1179
1180
1180 This takes arguments below:
1181 This takes arguments below:
1181
1182
1182 :checkout: to which revision the working directory is updated
1183 :checkout: to which revision the working directory is updated
1183 :brev: a name, which might be a bookmark to be activated after updating
1184 :brev: a name, which might be a bookmark to be activated after updating
1184 :clean: whether changes in the working directory can be discarded
1185 :clean: whether changes in the working directory can be discarded
1185 :updatecheck: how to deal with a dirty working directory
1186 :updatecheck: how to deal with a dirty working directory
1186
1187
1187 Valid values for updatecheck are the UPDATECHECK_* constants
1188 Valid values for updatecheck are the UPDATECHECK_* constants
1188 defined in the merge module. Passing `None` will result in using the
1189 defined in the merge module. Passing `None` will result in using the
1189 configured default.
1190 configured default.
1190
1191
1191 * ABORT: abort if the working directory is dirty
1192 * ABORT: abort if the working directory is dirty
1192 * NONE: don't check (merge working directory changes into destination)
1193 * NONE: don't check (merge working directory changes into destination)
1193 * LINEAR: check that update is linear before merging working directory
1194 * LINEAR: check that update is linear before merging working directory
1194 changes into destination
1195 changes into destination
1195 * NO_CONFLICT: check that the update does not result in file merges
1196 * NO_CONFLICT: check that the update does not result in file merges
1196
1197
1197 This returns whether conflict is detected at updating or not.
1198 This returns whether conflict is detected at updating or not.
1198 """
1199 """
1199 if updatecheck is None:
1200 if updatecheck is None:
1200 updatecheck = ui.config(b'commands', b'update.check')
1201 updatecheck = ui.config(b'commands', b'update.check')
1201 if updatecheck not in _VALID_UPDATECHECKS:
1202 if updatecheck not in _VALID_UPDATECHECKS:
1202 # If not configured, or invalid value configured
1203 # If not configured, or invalid value configured
1203 updatecheck = mergemod.UPDATECHECK_LINEAR
1204 updatecheck = mergemod.UPDATECHECK_LINEAR
1204 if updatecheck not in _VALID_UPDATECHECKS:
1205 if updatecheck not in _VALID_UPDATECHECKS:
1205 raise ValueError(
1206 raise ValueError(
1206 r'Invalid updatecheck value %r (can accept %r)'
1207 r'Invalid updatecheck value %r (can accept %r)'
1207 % (updatecheck, _VALID_UPDATECHECKS)
1208 % (updatecheck, _VALID_UPDATECHECKS)
1208 )
1209 )
1209 with repo.wlock():
1210 with repo.wlock():
1210 movemarkfrom = None
1211 movemarkfrom = None
1211 warndest = False
1212 warndest = False
1212 if checkout is None:
1213 if checkout is None:
1213 updata = destutil.destupdate(repo, clean=clean)
1214 updata = destutil.destupdate(repo, clean=clean)
1214 checkout, movemarkfrom, brev = updata
1215 checkout, movemarkfrom, brev = updata
1215 warndest = True
1216 warndest = True
1216
1217
1217 if clean:
1218 if clean:
1218 ret = _clean(repo, checkout)
1219 ret = _clean(repo, checkout)
1219 else:
1220 else:
1220 if updatecheck == mergemod.UPDATECHECK_ABORT:
1221 if updatecheck == mergemod.UPDATECHECK_ABORT:
1221 cmdutil.bailifchanged(repo, merge=False)
1222 cmdutil.bailifchanged(repo, merge=False)
1222 updatecheck = mergemod.UPDATECHECK_NONE
1223 updatecheck = mergemod.UPDATECHECK_NONE
1223 ret = _update(repo, checkout, updatecheck=updatecheck)
1224 ret = _update(repo, checkout, updatecheck=updatecheck)
1224
1225
1225 if not ret and movemarkfrom:
1226 if not ret and movemarkfrom:
1226 if movemarkfrom == repo[b'.'].node():
1227 if movemarkfrom == repo[b'.'].node():
1227 pass # no-op update
1228 pass # no-op update
1228 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1229 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1229 b = ui.label(repo._activebookmark, b'bookmarks.active')
1230 b = ui.label(repo._activebookmark, b'bookmarks.active')
1230 ui.status(_(b"updating bookmark %s\n") % b)
1231 ui.status(_(b"updating bookmark %s\n") % b)
1231 else:
1232 else:
1232 # this can happen with a non-linear update
1233 # this can happen with a non-linear update
1233 b = ui.label(repo._activebookmark, b'bookmarks')
1234 b = ui.label(repo._activebookmark, b'bookmarks')
1234 ui.status(_(b"(leaving bookmark %s)\n") % b)
1235 ui.status(_(b"(leaving bookmark %s)\n") % b)
1235 bookmarks.deactivate(repo)
1236 bookmarks.deactivate(repo)
1236 elif brev in repo._bookmarks:
1237 elif brev in repo._bookmarks:
1237 if brev != repo._activebookmark:
1238 if brev != repo._activebookmark:
1238 b = ui.label(brev, b'bookmarks.active')
1239 b = ui.label(brev, b'bookmarks.active')
1239 ui.status(_(b"(activating bookmark %s)\n") % b)
1240 ui.status(_(b"(activating bookmark %s)\n") % b)
1240 bookmarks.activate(repo, brev)
1241 bookmarks.activate(repo, brev)
1241 elif brev:
1242 elif brev:
1242 if repo._activebookmark:
1243 if repo._activebookmark:
1243 b = ui.label(repo._activebookmark, b'bookmarks')
1244 b = ui.label(repo._activebookmark, b'bookmarks')
1244 ui.status(_(b"(leaving bookmark %s)\n") % b)
1245 ui.status(_(b"(leaving bookmark %s)\n") % b)
1245 bookmarks.deactivate(repo)
1246 bookmarks.deactivate(repo)
1246
1247
1247 if warndest:
1248 if warndest:
1248 destutil.statusotherdests(ui, repo)
1249 destutil.statusotherdests(ui, repo)
1249
1250
1250 return ret
1251 return ret
1251
1252
1252
1253
1253 def merge(
1254 def merge(
1254 ctx,
1255 ctx,
1255 force=False,
1256 force=False,
1256 remind=True,
1257 remind=True,
1257 labels=None,
1258 labels=None,
1258 ):
1259 ):
1259 """Branch merge with node, resolving changes. Return true if any
1260 """Branch merge with node, resolving changes. Return true if any
1260 unresolved conflicts."""
1261 unresolved conflicts."""
1261 repo = ctx.repo()
1262 repo = ctx.repo()
1262 stats = mergemod.merge(ctx, force=force, labels=labels)
1263 stats = mergemod.merge(ctx, force=force, labels=labels)
1263 _showstats(repo, stats)
1264 _showstats(repo, stats)
1264 if stats.unresolvedcount:
1265 if stats.unresolvedcount:
1265 repo.ui.status(
1266 repo.ui.status(
1266 _(
1267 _(
1267 b"use 'hg resolve' to retry unresolved file merges "
1268 b"use 'hg resolve' to retry unresolved file merges "
1268 b"or 'hg merge --abort' to abandon\n"
1269 b"or 'hg merge --abort' to abandon\n"
1269 )
1270 )
1270 )
1271 )
1271 elif remind:
1272 elif remind:
1272 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1273 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1273 return stats.unresolvedcount > 0
1274 return stats.unresolvedcount > 0
1274
1275
1275
1276
1276 def abortmerge(ui, repo):
1277 def abortmerge(ui, repo):
1277 ms = mergestatemod.mergestate.read(repo)
1278 ms = mergestatemod.mergestate.read(repo)
1278 if ms.active():
1279 if ms.active():
1279 # there were conflicts
1280 # there were conflicts
1280 node = ms.localctx.hex()
1281 node = ms.localctx.hex()
1281 else:
1282 else:
1282 # there were no conficts, mergestate was not stored
1283 # there were no conficts, mergestate was not stored
1283 node = repo[b'.'].hex()
1284 node = repo[b'.'].hex()
1284
1285
1285 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1286 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1286 stats = mergemod.clean_update(repo[node])
1287 stats = mergemod.clean_update(repo[node])
1287 assert stats.unresolvedcount == 0
1288 assert stats.unresolvedcount == 0
1288 _showstats(repo, stats)
1289 _showstats(repo, stats)
1289
1290
1290
1291
1291 def _incoming(
1292 def _incoming(
1292 displaychlist,
1293 displaychlist,
1293 subreporecurse,
1294 subreporecurse,
1294 ui,
1295 ui,
1295 repo,
1296 repo,
1296 source,
1297 source,
1297 opts,
1298 opts,
1298 buffered=False,
1299 buffered=False,
1299 subpath=None,
1300 subpath=None,
1300 ):
1301 ):
1301 """
1302 """
1302 Helper for incoming / gincoming.
1303 Helper for incoming / gincoming.
1303 displaychlist gets called with
1304 displaychlist gets called with
1304 (remoterepo, incomingchangesetlist, displayer) parameters,
1305 (remoterepo, incomingchangesetlist, displayer) parameters,
1305 and is supposed to contain only code that can't be unified.
1306 and is supposed to contain only code that can't be unified.
1306 """
1307 """
1307 srcs = urlutil.get_pull_paths(repo, ui, [source])
1308 srcs = urlutil.get_pull_paths(repo, ui, [source])
1308 srcs = list(srcs)
1309 srcs = list(srcs)
1309 if len(srcs) != 1:
1310 if len(srcs) != 1:
1310 msg = _(b'for now, incoming supports only a single source, %d provided')
1311 msg = _(b'for now, incoming supports only a single source, %d provided')
1311 msg %= len(srcs)
1312 msg %= len(srcs)
1312 raise error.Abort(msg)
1313 raise error.Abort(msg)
1313 path = srcs[0]
1314 path = srcs[0]
1314 if subpath is None:
1315 if subpath is None:
1315 peer_path = path
1316 peer_path = path
1316 url = path.loc
1317 url = path.loc
1317 else:
1318 else:
1318 # XXX path: we are losing the `path` object here. Keeping it would be
1319 # XXX path: we are losing the `path` object here. Keeping it would be
1319 # valuable. For example as a "variant" as we do for pushes.
1320 # valuable. For example as a "variant" as we do for pushes.
1320 subpath = urlutil.url(subpath)
1321 subpath = urlutil.url(subpath)
1321 if subpath.isabs():
1322 if subpath.isabs():
1322 peer_path = url = bytes(subpath)
1323 peer_path = url = bytes(subpath)
1323 else:
1324 else:
1324 p = urlutil.url(path.loc)
1325 p = urlutil.url(path.loc)
1325 if p.islocal():
1326 if p.islocal():
1326 normpath = os.path.normpath
1327 normpath = os.path.normpath
1327 else:
1328 else:
1328 normpath = posixpath.normpath
1329 normpath = posixpath.normpath
1329 p.path = normpath(b'%s/%s' % (p.path, subpath))
1330 p.path = normpath(b'%s/%s' % (p.path, subpath))
1330 peer_path = url = bytes(p)
1331 peer_path = url = bytes(p)
1331 other = peer(repo, opts, peer_path)
1332 other = peer(repo, opts, peer_path)
1332 cleanupfn = other.close
1333 cleanupfn = other.close
1333 try:
1334 try:
1334 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1335 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1335 branches = (path.branch, opts.get(b'branch', []))
1336 branches = (path.branch, opts.get(b'branch', []))
1336 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1337 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1337
1338
1338 if revs:
1339 if revs:
1339 revs = [other.lookup(rev) for rev in revs]
1340 revs = [other.lookup(rev) for rev in revs]
1340 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1341 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1341 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1342 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1342 )
1343 )
1343
1344
1344 if not chlist:
1345 if not chlist:
1345 ui.status(_(b"no changes found\n"))
1346 ui.status(_(b"no changes found\n"))
1346 return subreporecurse()
1347 return subreporecurse()
1347 ui.pager(b'incoming')
1348 ui.pager(b'incoming')
1348 displayer = logcmdutil.changesetdisplayer(
1349 displayer = logcmdutil.changesetdisplayer(
1349 ui, other, opts, buffered=buffered
1350 ui, other, opts, buffered=buffered
1350 )
1351 )
1351 displaychlist(other, chlist, displayer)
1352 displaychlist(other, chlist, displayer)
1352 displayer.close()
1353 displayer.close()
1353 finally:
1354 finally:
1354 cleanupfn()
1355 cleanupfn()
1355 subreporecurse()
1356 subreporecurse()
1356 return 0 # exit code is zero since we found incoming changes
1357 return 0 # exit code is zero since we found incoming changes
1357
1358
1358
1359
1359 def incoming(ui, repo, source, opts, subpath=None):
1360 def incoming(ui, repo, source, opts, subpath=None):
1360 def subreporecurse():
1361 def subreporecurse():
1361 ret = 1
1362 ret = 1
1362 if opts.get(b'subrepos'):
1363 if opts.get(b'subrepos'):
1363 ctx = repo[None]
1364 ctx = repo[None]
1364 for subpath in sorted(ctx.substate):
1365 for subpath in sorted(ctx.substate):
1365 sub = ctx.sub(subpath)
1366 sub = ctx.sub(subpath)
1366 ret = min(ret, sub.incoming(ui, source, opts))
1367 ret = min(ret, sub.incoming(ui, source, opts))
1367 return ret
1368 return ret
1368
1369
1369 def display(other, chlist, displayer):
1370 def display(other, chlist, displayer):
1370 limit = logcmdutil.getlimit(opts)
1371 limit = logcmdutil.getlimit(opts)
1371 if opts.get(b'newest_first'):
1372 if opts.get(b'newest_first'):
1372 chlist.reverse()
1373 chlist.reverse()
1373 count = 0
1374 count = 0
1374 for n in chlist:
1375 for n in chlist:
1375 if limit is not None and count >= limit:
1376 if limit is not None and count >= limit:
1376 break
1377 break
1377 parents = [
1378 parents = [
1378 p for p in other.changelog.parents(n) if p != repo.nullid
1379 p for p in other.changelog.parents(n) if p != repo.nullid
1379 ]
1380 ]
1380 if opts.get(b'no_merges') and len(parents) == 2:
1381 if opts.get(b'no_merges') and len(parents) == 2:
1381 continue
1382 continue
1382 count += 1
1383 count += 1
1383 displayer.show(other[n])
1384 displayer.show(other[n])
1384
1385
1385 return _incoming(
1386 return _incoming(
1386 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1387 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1387 )
1388 )
1388
1389
1389
1390
1390 def _outgoing(ui, repo, dests, opts, subpath=None):
1391 def _outgoing(ui, repo, dests, opts, subpath=None):
1391 out = set()
1392 out = set()
1392 others = []
1393 others = []
1393 for path in urlutil.get_push_paths(repo, ui, dests):
1394 for path in urlutil.get_push_paths(repo, ui, dests):
1394 dest = path.loc
1395 dest = path.loc
1395 if subpath is not None:
1396 if subpath is not None:
1396 subpath = urlutil.url(subpath)
1397 subpath = urlutil.url(subpath)
1397 if subpath.isabs():
1398 if subpath.isabs():
1398 dest = bytes(subpath)
1399 dest = bytes(subpath)
1399 else:
1400 else:
1400 p = urlutil.url(dest)
1401 p = urlutil.url(dest)
1401 if p.islocal():
1402 if p.islocal():
1402 normpath = os.path.normpath
1403 normpath = os.path.normpath
1403 else:
1404 else:
1404 normpath = posixpath.normpath
1405 normpath = posixpath.normpath
1405 p.path = normpath(b'%s/%s' % (p.path, subpath))
1406 p.path = normpath(b'%s/%s' % (p.path, subpath))
1406 dest = bytes(p)
1407 dest = bytes(p)
1407 branches = path.branch, opts.get(b'branch') or []
1408 branches = path.branch, opts.get(b'branch') or []
1408
1409
1409 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1410 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1410 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1411 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1411 if revs:
1412 if revs:
1412 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1413 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1413
1414
1414 other = peer(repo, opts, dest)
1415 other = peer(repo, opts, dest)
1415 try:
1416 try:
1416 outgoing = discovery.findcommonoutgoing(
1417 outgoing = discovery.findcommonoutgoing(
1417 repo, other, revs, force=opts.get(b'force')
1418 repo, other, revs, force=opts.get(b'force')
1418 )
1419 )
1419 o = outgoing.missing
1420 o = outgoing.missing
1420 out.update(o)
1421 out.update(o)
1421 if not o:
1422 if not o:
1422 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1423 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1423 others.append(other)
1424 others.append(other)
1424 except: # re-raises
1425 except: # re-raises
1425 other.close()
1426 other.close()
1426 raise
1427 raise
1427 # make sure this is ordered by revision number
1428 # make sure this is ordered by revision number
1428 outgoing_revs = list(out)
1429 outgoing_revs = list(out)
1429 cl = repo.changelog
1430 cl = repo.changelog
1430 outgoing_revs.sort(key=cl.rev)
1431 outgoing_revs.sort(key=cl.rev)
1431 return outgoing_revs, others
1432 return outgoing_revs, others
1432
1433
1433
1434
1434 def _outgoing_recurse(ui, repo, dests, opts):
1435 def _outgoing_recurse(ui, repo, dests, opts):
1435 ret = 1
1436 ret = 1
1436 if opts.get(b'subrepos'):
1437 if opts.get(b'subrepos'):
1437 ctx = repo[None]
1438 ctx = repo[None]
1438 for subpath in sorted(ctx.substate):
1439 for subpath in sorted(ctx.substate):
1439 sub = ctx.sub(subpath)
1440 sub = ctx.sub(subpath)
1440 ret = min(ret, sub.outgoing(ui, dests, opts))
1441 ret = min(ret, sub.outgoing(ui, dests, opts))
1441 return ret
1442 return ret
1442
1443
1443
1444
1444 def _outgoing_filter(repo, revs, opts):
1445 def _outgoing_filter(repo, revs, opts):
1445 """apply revision filtering/ordering option for outgoing"""
1446 """apply revision filtering/ordering option for outgoing"""
1446 limit = logcmdutil.getlimit(opts)
1447 limit = logcmdutil.getlimit(opts)
1447 no_merges = opts.get(b'no_merges')
1448 no_merges = opts.get(b'no_merges')
1448 if opts.get(b'newest_first'):
1449 if opts.get(b'newest_first'):
1449 revs.reverse()
1450 revs.reverse()
1450 if limit is None and not no_merges:
1451 if limit is None and not no_merges:
1451 for r in revs:
1452 for r in revs:
1452 yield r
1453 yield r
1453 return
1454 return
1454
1455
1455 count = 0
1456 count = 0
1456 cl = repo.changelog
1457 cl = repo.changelog
1457 for n in revs:
1458 for n in revs:
1458 if limit is not None and count >= limit:
1459 if limit is not None and count >= limit:
1459 break
1460 break
1460 parents = [p for p in cl.parents(n) if p != repo.nullid]
1461 parents = [p for p in cl.parents(n) if p != repo.nullid]
1461 if no_merges and len(parents) == 2:
1462 if no_merges and len(parents) == 2:
1462 continue
1463 continue
1463 count += 1
1464 count += 1
1464 yield n
1465 yield n
1465
1466
1466
1467
1467 def outgoing(ui, repo, dests, opts, subpath=None):
1468 def outgoing(ui, repo, dests, opts, subpath=None):
1468 if opts.get(b'graph'):
1469 if opts.get(b'graph'):
1469 logcmdutil.checkunsupportedgraphflags([], opts)
1470 logcmdutil.checkunsupportedgraphflags([], opts)
1470 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1471 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1471 ret = 1
1472 ret = 1
1472 try:
1473 try:
1473 if o:
1474 if o:
1474 ret = 0
1475 ret = 0
1475
1476
1476 if opts.get(b'graph'):
1477 if opts.get(b'graph'):
1477 revdag = logcmdutil.graphrevs(repo, o, opts)
1478 revdag = logcmdutil.graphrevs(repo, o, opts)
1478 ui.pager(b'outgoing')
1479 ui.pager(b'outgoing')
1479 displayer = logcmdutil.changesetdisplayer(
1480 displayer = logcmdutil.changesetdisplayer(
1480 ui, repo, opts, buffered=True
1481 ui, repo, opts, buffered=True
1481 )
1482 )
1482 logcmdutil.displaygraph(
1483 logcmdutil.displaygraph(
1483 ui, repo, revdag, displayer, graphmod.asciiedges
1484 ui, repo, revdag, displayer, graphmod.asciiedges
1484 )
1485 )
1485 else:
1486 else:
1486 ui.pager(b'outgoing')
1487 ui.pager(b'outgoing')
1487 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1488 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1488 for n in _outgoing_filter(repo, o, opts):
1489 for n in _outgoing_filter(repo, o, opts):
1489 displayer.show(repo[n])
1490 displayer.show(repo[n])
1490 displayer.close()
1491 displayer.close()
1491 for oth in others:
1492 for oth in others:
1492 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1493 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1493 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1494 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1494 return ret # exit code is zero since we found outgoing changes
1495 return ret # exit code is zero since we found outgoing changes
1495 finally:
1496 finally:
1496 for oth in others:
1497 for oth in others:
1497 oth.close()
1498 oth.close()
1498
1499
1499
1500
1500 def verify(repo, level=None):
1501 def verify(repo, level=None):
1501 """verify the consistency of a repository"""
1502 """verify the consistency of a repository"""
1502 ret = verifymod.verify(repo, level=level)
1503 ret = verifymod.verify(repo, level=level)
1503
1504
1504 # Broken subrepo references in hidden csets don't seem worth worrying about,
1505 # Broken subrepo references in hidden csets don't seem worth worrying about,
1505 # since they can't be pushed/pulled, and --hidden can be used if they are a
1506 # since they can't be pushed/pulled, and --hidden can be used if they are a
1506 # concern.
1507 # concern.
1507
1508
1508 # pathto() is needed for -R case
1509 # pathto() is needed for -R case
1509 revs = repo.revs(
1510 revs = repo.revs(
1510 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1511 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1511 )
1512 )
1512
1513
1513 if revs:
1514 if revs:
1514 repo.ui.status(_(b'checking subrepo links\n'))
1515 repo.ui.status(_(b'checking subrepo links\n'))
1515 for rev in revs:
1516 for rev in revs:
1516 ctx = repo[rev]
1517 ctx = repo[rev]
1517 try:
1518 try:
1518 for subpath in ctx.substate:
1519 for subpath in ctx.substate:
1519 try:
1520 try:
1520 ret = (
1521 ret = (
1521 ctx.sub(subpath, allowcreate=False).verify() or ret
1522 ctx.sub(subpath, allowcreate=False).verify() or ret
1522 )
1523 )
1523 except error.RepoError as e:
1524 except error.RepoError as e:
1524 repo.ui.warn(b'%d: %s\n' % (rev, e))
1525 repo.ui.warn(b'%d: %s\n' % (rev, e))
1525 except Exception:
1526 except Exception:
1526 repo.ui.warn(
1527 repo.ui.warn(
1527 _(b'.hgsubstate is corrupt in revision %s\n')
1528 _(b'.hgsubstate is corrupt in revision %s\n')
1528 % short(ctx.node())
1529 % short(ctx.node())
1529 )
1530 )
1530
1531
1531 return ret
1532 return ret
1532
1533
1533
1534
1534 def remoteui(src, opts):
1535 def remoteui(src, opts):
1535 """build a remote ui from ui or repo and opts"""
1536 """build a remote ui from ui or repo and opts"""
1536 if util.safehasattr(src, b'baseui'): # looks like a repository
1537 if util.safehasattr(src, b'baseui'): # looks like a repository
1537 dst = src.baseui.copy() # drop repo-specific config
1538 dst = src.baseui.copy() # drop repo-specific config
1538 src = src.ui # copy target options from repo
1539 src = src.ui # copy target options from repo
1539 else: # assume it's a global ui object
1540 else: # assume it's a global ui object
1540 dst = src.copy() # keep all global options
1541 dst = src.copy() # keep all global options
1541
1542
1542 # copy ssh-specific options
1543 # copy ssh-specific options
1543 for o in b'ssh', b'remotecmd':
1544 for o in b'ssh', b'remotecmd':
1544 v = opts.get(o) or src.config(b'ui', o)
1545 v = opts.get(o) or src.config(b'ui', o)
1545 if v:
1546 if v:
1546 dst.setconfig(b"ui", o, v, b'copied')
1547 dst.setconfig(b"ui", o, v, b'copied')
1547
1548
1548 # copy bundle-specific options
1549 # copy bundle-specific options
1549 r = src.config(b'bundle', b'mainreporoot')
1550 r = src.config(b'bundle', b'mainreporoot')
1550 if r:
1551 if r:
1551 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1552 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1552
1553
1553 # copy selected local settings to the remote ui
1554 # copy selected local settings to the remote ui
1554 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1555 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1555 for key, val in src.configitems(sect):
1556 for key, val in src.configitems(sect):
1556 dst.setconfig(sect, key, val, b'copied')
1557 dst.setconfig(sect, key, val, b'copied')
1557 v = src.config(b'web', b'cacerts')
1558 v = src.config(b'web', b'cacerts')
1558 if v:
1559 if v:
1559 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1560 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1560
1561
1561 return dst
1562 return dst
1562
1563
1563
1564
1564 # Files of interest
1565 # Files of interest
1565 # Used to check if the repository has changed looking at mtime and size of
1566 # Used to check if the repository has changed looking at mtime and size of
1566 # these files.
1567 # these files.
1567 foi = [
1568 foi = [
1568 (b'spath', b'00changelog.i'),
1569 (b'spath', b'00changelog.i'),
1569 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1570 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1570 (b'spath', b'obsstore'),
1571 (b'spath', b'obsstore'),
1571 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1572 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1572 ]
1573 ]
1573
1574
1574
1575
1575 class cachedlocalrepo:
1576 class cachedlocalrepo:
1576 """Holds a localrepository that can be cached and reused."""
1577 """Holds a localrepository that can be cached and reused."""
1577
1578
1578 def __init__(self, repo):
1579 def __init__(self, repo):
1579 """Create a new cached repo from an existing repo.
1580 """Create a new cached repo from an existing repo.
1580
1581
1581 We assume the passed in repo was recently created. If the
1582 We assume the passed in repo was recently created. If the
1582 repo has changed between when it was created and when it was
1583 repo has changed between when it was created and when it was
1583 turned into a cache, it may not refresh properly.
1584 turned into a cache, it may not refresh properly.
1584 """
1585 """
1585 assert isinstance(repo, localrepo.localrepository)
1586 assert isinstance(repo, localrepo.localrepository)
1586 self._repo = repo
1587 self._repo = repo
1587 self._state, self.mtime = self._repostate()
1588 self._state, self.mtime = self._repostate()
1588 self._filtername = repo.filtername
1589 self._filtername = repo.filtername
1589
1590
1590 def fetch(self):
1591 def fetch(self):
1591 """Refresh (if necessary) and return a repository.
1592 """Refresh (if necessary) and return a repository.
1592
1593
1593 If the cached instance is out of date, it will be recreated
1594 If the cached instance is out of date, it will be recreated
1594 automatically and returned.
1595 automatically and returned.
1595
1596
1596 Returns a tuple of the repo and a boolean indicating whether a new
1597 Returns a tuple of the repo and a boolean indicating whether a new
1597 repo instance was created.
1598 repo instance was created.
1598 """
1599 """
1599 # We compare the mtimes and sizes of some well-known files to
1600 # We compare the mtimes and sizes of some well-known files to
1600 # determine if the repo changed. This is not precise, as mtimes
1601 # determine if the repo changed. This is not precise, as mtimes
1601 # are susceptible to clock skew and imprecise filesystems and
1602 # are susceptible to clock skew and imprecise filesystems and
1602 # file content can change while maintaining the same size.
1603 # file content can change while maintaining the same size.
1603
1604
1604 state, mtime = self._repostate()
1605 state, mtime = self._repostate()
1605 if state == self._state:
1606 if state == self._state:
1606 return self._repo, False
1607 return self._repo, False
1607
1608
1608 repo = repository(self._repo.baseui, self._repo.url())
1609 repo = repository(self._repo.baseui, self._repo.url())
1609 if self._filtername:
1610 if self._filtername:
1610 self._repo = repo.filtered(self._filtername)
1611 self._repo = repo.filtered(self._filtername)
1611 else:
1612 else:
1612 self._repo = repo.unfiltered()
1613 self._repo = repo.unfiltered()
1613 self._state = state
1614 self._state = state
1614 self.mtime = mtime
1615 self.mtime = mtime
1615
1616
1616 return self._repo, True
1617 return self._repo, True
1617
1618
1618 def _repostate(self):
1619 def _repostate(self):
1619 state = []
1620 state = []
1620 maxmtime = -1
1621 maxmtime = -1
1621 for attr, fname in foi:
1622 for attr, fname in foi:
1622 prefix = getattr(self._repo, attr)
1623 prefix = getattr(self._repo, attr)
1623 p = os.path.join(prefix, fname)
1624 p = os.path.join(prefix, fname)
1624 try:
1625 try:
1625 st = os.stat(p)
1626 st = os.stat(p)
1626 except OSError:
1627 except OSError:
1627 st = os.stat(prefix)
1628 st = os.stat(prefix)
1628 state.append((st[stat.ST_MTIME], st.st_size))
1629 state.append((st[stat.ST_MTIME], st.st_size))
1629 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1630 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1630
1631
1631 return tuple(state), maxmtime
1632 return tuple(state), maxmtime
1632
1633
1633 def copy(self):
1634 def copy(self):
1634 """Obtain a copy of this class instance.
1635 """Obtain a copy of this class instance.
1635
1636
1636 A new localrepository instance is obtained. The new instance should be
1637 A new localrepository instance is obtained. The new instance should be
1637 completely independent of the original.
1638 completely independent of the original.
1638 """
1639 """
1639 repo = repository(self._repo.baseui, self._repo.origroot)
1640 repo = repository(self._repo.baseui, self._repo.origroot)
1640 if self._filtername:
1641 if self._filtername:
1641 repo = repo.filtered(self._filtername)
1642 repo = repo.filtered(self._filtername)
1642 else:
1643 else:
1643 repo = repo.unfiltered()
1644 repo = repo.unfiltered()
1644 c = cachedlocalrepo(repo)
1645 c = cachedlocalrepo(repo)
1645 c._state = self._state
1646 c._state = self._state
1646 c.mtime = self.mtime
1647 c.mtime = self.mtime
1647 return c
1648 return c
General Comments 0
You need to be logged in to leave comments. Login now