##// END OF EJS Templates
peer-or-repo: build a repo directly in the `repo` function...
marmoute -
r50588:ebb5e38f default
parent child Browse files
Show More
@@ -1,1643 +1,1646 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _peerorrepo(
195 def _peerorrepo(
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 ):
197 ):
198 """return a repository object for the specified path"""
198 """return a repository object for the specified path"""
199 cls = _peerlookup(path)
199 cls = _peerlookup(path)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 return obj
202 return obj
203
203
204
204
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 ui = getattr(obj, "ui", ui)
206 ui = getattr(obj, "ui", ui)
207 for f in presetupfuncs or []:
207 for f in presetupfuncs or []:
208 f(ui, obj)
208 f(ui, obj)
209 ui.log(b'extension', b'- executing reposetup hooks\n')
209 ui.log(b'extension', b'- executing reposetup hooks\n')
210 with util.timedcm('all reposetup') as allreposetupstats:
210 with util.timedcm('all reposetup') as allreposetupstats:
211 for name, module in extensions.extensions(ui):
211 for name, module in extensions.extensions(ui):
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 hook = getattr(module, 'reposetup', None)
213 hook = getattr(module, 'reposetup', None)
214 if hook:
214 if hook:
215 with util.timedcm('reposetup %r', name) as stats:
215 with util.timedcm('reposetup %r', name) as stats:
216 hook(ui, obj)
216 hook(ui, obj)
217 msg = b' > reposetup for %s took %s\n'
217 msg = b' > reposetup for %s took %s\n'
218 ui.log(b'extension', msg, name, stats)
218 ui.log(b'extension', msg, name, stats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 if not obj.local():
220 if not obj.local():
221 for f in wirepeersetupfuncs:
221 for f in wirepeersetupfuncs:
222 f(ui, obj)
222 f(ui, obj)
223
223
224
224
225 def repository(
225 def repository(
226 ui,
226 ui,
227 path=b'',
227 path=b'',
228 create=False,
228 create=False,
229 presetupfuncs=None,
229 presetupfuncs=None,
230 intents=None,
230 intents=None,
231 createopts=None,
231 createopts=None,
232 ):
232 ):
233 """return a repository object for the specified path"""
233 """return a repository object for the specified path"""
234 peer = _peerorrepo(
234 scheme = urlutil.url(path).scheme
235 if scheme is None:
236 scheme = b'file'
237 cls = repo_schemes.get(scheme)
238 if cls is None:
239 if scheme in peer_schemes:
240 raise error.Abort(_(b"repository '%s' is not local") % path)
241 cls = LocalFactory
242 repo = cls.instance(
235 ui,
243 ui,
236 path,
244 path,
237 create,
245 create,
238 presetupfuncs=presetupfuncs,
239 intents=intents,
246 intents=intents,
240 createopts=createopts,
247 createopts=createopts,
241 )
248 )
242 repo = peer.local()
249 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
243 if not repo:
244 raise error.Abort(
245 _(b"repository '%s' is not local") % (path or peer.url())
246 )
247 return repo.filtered(b'visible')
250 return repo.filtered(b'visible')
248
251
249
252
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
253 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
251 '''return a repository peer for the specified path'''
254 '''return a repository peer for the specified path'''
252 rui = remoteui(uiorrepo, opts)
255 rui = remoteui(uiorrepo, opts)
253 scheme = urlutil.url(path).scheme
256 scheme = urlutil.url(path).scheme
254 if scheme in peer_schemes:
257 if scheme in peer_schemes:
255 cls = peer_schemes[scheme]
258 cls = peer_schemes[scheme]
256 peer = cls.instance(
259 peer = cls.instance(
257 rui,
260 rui,
258 path,
261 path,
259 create,
262 create,
260 intents=intents,
263 intents=intents,
261 createopts=createopts,
264 createopts=createopts,
262 )
265 )
263 _setup_repo_or_peer(rui, peer)
266 _setup_repo_or_peer(rui, peer)
264 else:
267 else:
265 # this is a repository
268 # this is a repository
266 repo = repository(
269 repo = repository(
267 rui,
270 rui,
268 path,
271 path,
269 create,
272 create,
270 intents=intents,
273 intents=intents,
271 createopts=createopts,
274 createopts=createopts,
272 )
275 )
273 peer = repo.peer()
276 peer = repo.peer()
274 return peer
277 return peer
275
278
276
279
277 def defaultdest(source):
280 def defaultdest(source):
278 """return default destination of clone if none is given
281 """return default destination of clone if none is given
279
282
280 >>> defaultdest(b'foo')
283 >>> defaultdest(b'foo')
281 'foo'
284 'foo'
282 >>> defaultdest(b'/foo/bar')
285 >>> defaultdest(b'/foo/bar')
283 'bar'
286 'bar'
284 >>> defaultdest(b'/')
287 >>> defaultdest(b'/')
285 ''
288 ''
286 >>> defaultdest(b'')
289 >>> defaultdest(b'')
287 ''
290 ''
288 >>> defaultdest(b'http://example.org/')
291 >>> defaultdest(b'http://example.org/')
289 ''
292 ''
290 >>> defaultdest(b'http://example.org/foo/')
293 >>> defaultdest(b'http://example.org/foo/')
291 'foo'
294 'foo'
292 """
295 """
293 path = urlutil.url(source).path
296 path = urlutil.url(source).path
294 if not path:
297 if not path:
295 return b''
298 return b''
296 return os.path.basename(os.path.normpath(path))
299 return os.path.basename(os.path.normpath(path))
297
300
298
301
299 def sharedreposource(repo):
302 def sharedreposource(repo):
300 """Returns repository object for source repository of a shared repo.
303 """Returns repository object for source repository of a shared repo.
301
304
302 If repo is not a shared repository, returns None.
305 If repo is not a shared repository, returns None.
303 """
306 """
304 if repo.sharedpath == repo.path:
307 if repo.sharedpath == repo.path:
305 return None
308 return None
306
309
307 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
308 return repo.srcrepo
311 return repo.srcrepo
309
312
310 # the sharedpath always ends in the .hg; we want the path to the repo
313 # the sharedpath always ends in the .hg; we want the path to the repo
311 source = repo.vfs.split(repo.sharedpath)[0]
314 source = repo.vfs.split(repo.sharedpath)[0]
312 srcurl, branches = urlutil.parseurl(source)
315 srcurl, branches = urlutil.parseurl(source)
313 srcrepo = repository(repo.ui, srcurl)
316 srcrepo = repository(repo.ui, srcurl)
314 repo.srcrepo = srcrepo
317 repo.srcrepo = srcrepo
315 return srcrepo
318 return srcrepo
316
319
317
320
318 def share(
321 def share(
319 ui,
322 ui,
320 source,
323 source,
321 dest=None,
324 dest=None,
322 update=True,
325 update=True,
323 bookmarks=True,
326 bookmarks=True,
324 defaultpath=None,
327 defaultpath=None,
325 relative=False,
328 relative=False,
326 ):
329 ):
327 '''create a shared repository'''
330 '''create a shared repository'''
328
331
329 not_local_msg = _(b'can only share local repositories')
332 not_local_msg = _(b'can only share local repositories')
330 if util.safehasattr(source, 'local'):
333 if util.safehasattr(source, 'local'):
331 if source.local() is None:
334 if source.local() is None:
332 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
333 elif not islocal(source):
336 elif not islocal(source):
334 # XXX why are we getting bytes here ?
337 # XXX why are we getting bytes here ?
335 raise error.Abort(not_local_msg)
338 raise error.Abort(not_local_msg)
336
339
337 if not dest:
340 if not dest:
338 dest = defaultdest(source)
341 dest = defaultdest(source)
339 else:
342 else:
340 dest = urlutil.get_clone_path(ui, dest)[1]
343 dest = urlutil.get_clone_path(ui, dest)[1]
341
344
342 if isinstance(source, bytes):
345 if isinstance(source, bytes):
343 origsource, source, branches = urlutil.get_clone_path(ui, source)
346 origsource, source, branches = urlutil.get_clone_path(ui, source)
344 srcrepo = repository(ui, source)
347 srcrepo = repository(ui, source)
345 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
348 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
346 else:
349 else:
347 srcrepo = source.local()
350 srcrepo = source.local()
348 checkout = None
351 checkout = None
349
352
350 shareditems = set()
353 shareditems = set()
351 if bookmarks:
354 if bookmarks:
352 shareditems.add(sharedbookmarks)
355 shareditems.add(sharedbookmarks)
353
356
354 r = repository(
357 r = repository(
355 ui,
358 ui,
356 dest,
359 dest,
357 create=True,
360 create=True,
358 createopts={
361 createopts={
359 b'sharedrepo': srcrepo,
362 b'sharedrepo': srcrepo,
360 b'sharedrelative': relative,
363 b'sharedrelative': relative,
361 b'shareditems': shareditems,
364 b'shareditems': shareditems,
362 },
365 },
363 )
366 )
364
367
365 postshare(srcrepo, r, defaultpath=defaultpath)
368 postshare(srcrepo, r, defaultpath=defaultpath)
366 r = repository(ui, dest)
369 r = repository(ui, dest)
367 _postshareupdate(r, update, checkout=checkout)
370 _postshareupdate(r, update, checkout=checkout)
368 return r
371 return r
369
372
370
373
371 def _prependsourcehgrc(repo):
374 def _prependsourcehgrc(repo):
372 """copies the source repo config and prepend it in current repo .hg/hgrc
375 """copies the source repo config and prepend it in current repo .hg/hgrc
373 on unshare. This is only done if the share was perfomed using share safe
376 on unshare. This is only done if the share was perfomed using share safe
374 method where we share config of source in shares"""
377 method where we share config of source in shares"""
375 srcvfs = vfsmod.vfs(repo.sharedpath)
378 srcvfs = vfsmod.vfs(repo.sharedpath)
376 dstvfs = vfsmod.vfs(repo.path)
379 dstvfs = vfsmod.vfs(repo.path)
377
380
378 if not srcvfs.exists(b'hgrc'):
381 if not srcvfs.exists(b'hgrc'):
379 return
382 return
380
383
381 currentconfig = b''
384 currentconfig = b''
382 if dstvfs.exists(b'hgrc'):
385 if dstvfs.exists(b'hgrc'):
383 currentconfig = dstvfs.read(b'hgrc')
386 currentconfig = dstvfs.read(b'hgrc')
384
387
385 with dstvfs(b'hgrc', b'wb') as fp:
388 with dstvfs(b'hgrc', b'wb') as fp:
386 sourceconfig = srcvfs.read(b'hgrc')
389 sourceconfig = srcvfs.read(b'hgrc')
387 fp.write(b"# Config copied from shared source\n")
390 fp.write(b"# Config copied from shared source\n")
388 fp.write(sourceconfig)
391 fp.write(sourceconfig)
389 fp.write(b'\n')
392 fp.write(b'\n')
390 fp.write(currentconfig)
393 fp.write(currentconfig)
391
394
392
395
393 def unshare(ui, repo):
396 def unshare(ui, repo):
394 """convert a shared repository to a normal one
397 """convert a shared repository to a normal one
395
398
396 Copy the store data to the repo and remove the sharedpath data.
399 Copy the store data to the repo and remove the sharedpath data.
397
400
398 Returns a new repository object representing the unshared repository.
401 Returns a new repository object representing the unshared repository.
399
402
400 The passed repository object is not usable after this function is
403 The passed repository object is not usable after this function is
401 called.
404 called.
402 """
405 """
403
406
404 with repo.lock():
407 with repo.lock():
405 # we use locks here because if we race with commit, we
408 # we use locks here because if we race with commit, we
406 # can end up with extra data in the cloned revlogs that's
409 # can end up with extra data in the cloned revlogs that's
407 # not pointed to by changesets, thus causing verify to
410 # not pointed to by changesets, thus causing verify to
408 # fail
411 # fail
409 destlock = copystore(ui, repo, repo.path)
412 destlock = copystore(ui, repo, repo.path)
410 with destlock or util.nullcontextmanager():
413 with destlock or util.nullcontextmanager():
411 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
414 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
412 # we were sharing .hg/hgrc of the share source with the current
415 # we were sharing .hg/hgrc of the share source with the current
413 # repo. We need to copy that while unsharing otherwise it can
416 # repo. We need to copy that while unsharing otherwise it can
414 # disable hooks and other checks
417 # disable hooks and other checks
415 _prependsourcehgrc(repo)
418 _prependsourcehgrc(repo)
416
419
417 sharefile = repo.vfs.join(b'sharedpath')
420 sharefile = repo.vfs.join(b'sharedpath')
418 util.rename(sharefile, sharefile + b'.old')
421 util.rename(sharefile, sharefile + b'.old')
419
422
420 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
423 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
421 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
424 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
422 scmutil.writereporequirements(repo)
425 scmutil.writereporequirements(repo)
423
426
424 # Removing share changes some fundamental properties of the repo instance.
427 # Removing share changes some fundamental properties of the repo instance.
425 # So we instantiate a new repo object and operate on it rather than
428 # So we instantiate a new repo object and operate on it rather than
426 # try to keep the existing repo usable.
429 # try to keep the existing repo usable.
427 newrepo = repository(repo.baseui, repo.root, create=False)
430 newrepo = repository(repo.baseui, repo.root, create=False)
428
431
429 # TODO: figure out how to access subrepos that exist, but were previously
432 # TODO: figure out how to access subrepos that exist, but were previously
430 # removed from .hgsub
433 # removed from .hgsub
431 c = newrepo[b'.']
434 c = newrepo[b'.']
432 subs = c.substate
435 subs = c.substate
433 for s in sorted(subs):
436 for s in sorted(subs):
434 c.sub(s).unshare()
437 c.sub(s).unshare()
435
438
436 localrepo.poisonrepository(repo)
439 localrepo.poisonrepository(repo)
437
440
438 return newrepo
441 return newrepo
439
442
440
443
441 def postshare(sourcerepo, destrepo, defaultpath=None):
444 def postshare(sourcerepo, destrepo, defaultpath=None):
442 """Called after a new shared repo is created.
445 """Called after a new shared repo is created.
443
446
444 The new repo only has a requirements file and pointer to the source.
447 The new repo only has a requirements file and pointer to the source.
445 This function configures additional shared data.
448 This function configures additional shared data.
446
449
447 Extensions can wrap this function and write additional entries to
450 Extensions can wrap this function and write additional entries to
448 destrepo/.hg/shared to indicate additional pieces of data to be shared.
451 destrepo/.hg/shared to indicate additional pieces of data to be shared.
449 """
452 """
450 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
453 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
451 if default:
454 if default:
452 template = b'[paths]\ndefault = %s\n'
455 template = b'[paths]\ndefault = %s\n'
453 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
456 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
454 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
457 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
455 with destrepo.wlock():
458 with destrepo.wlock():
456 narrowspec.copytoworkingcopy(destrepo)
459 narrowspec.copytoworkingcopy(destrepo)
457
460
458
461
459 def _postshareupdate(repo, update, checkout=None):
462 def _postshareupdate(repo, update, checkout=None):
460 """Maybe perform a working directory update after a shared repo is created.
463 """Maybe perform a working directory update after a shared repo is created.
461
464
462 ``update`` can be a boolean or a revision to update to.
465 ``update`` can be a boolean or a revision to update to.
463 """
466 """
464 if not update:
467 if not update:
465 return
468 return
466
469
467 repo.ui.status(_(b"updating working directory\n"))
470 repo.ui.status(_(b"updating working directory\n"))
468 if update is not True:
471 if update is not True:
469 checkout = update
472 checkout = update
470 for test in (checkout, b'default', b'tip'):
473 for test in (checkout, b'default', b'tip'):
471 if test is None:
474 if test is None:
472 continue
475 continue
473 try:
476 try:
474 uprev = repo.lookup(test)
477 uprev = repo.lookup(test)
475 break
478 break
476 except error.RepoLookupError:
479 except error.RepoLookupError:
477 continue
480 continue
478 _update(repo, uprev)
481 _update(repo, uprev)
479
482
480
483
481 def copystore(ui, srcrepo, destpath):
484 def copystore(ui, srcrepo, destpath):
482 """copy files from store of srcrepo in destpath
485 """copy files from store of srcrepo in destpath
483
486
484 returns destlock
487 returns destlock
485 """
488 """
486 destlock = None
489 destlock = None
487 try:
490 try:
488 hardlink = None
491 hardlink = None
489 topic = _(b'linking') if hardlink else _(b'copying')
492 topic = _(b'linking') if hardlink else _(b'copying')
490 with ui.makeprogress(topic, unit=_(b'files')) as progress:
493 with ui.makeprogress(topic, unit=_(b'files')) as progress:
491 num = 0
494 num = 0
492 srcpublishing = srcrepo.publishing()
495 srcpublishing = srcrepo.publishing()
493 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
496 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
494 dstvfs = vfsmod.vfs(destpath)
497 dstvfs = vfsmod.vfs(destpath)
495 for f in srcrepo.store.copylist():
498 for f in srcrepo.store.copylist():
496 if srcpublishing and f.endswith(b'phaseroots'):
499 if srcpublishing and f.endswith(b'phaseroots'):
497 continue
500 continue
498 dstbase = os.path.dirname(f)
501 dstbase = os.path.dirname(f)
499 if dstbase and not dstvfs.exists(dstbase):
502 if dstbase and not dstvfs.exists(dstbase):
500 dstvfs.mkdir(dstbase)
503 dstvfs.mkdir(dstbase)
501 if srcvfs.exists(f):
504 if srcvfs.exists(f):
502 if f.endswith(b'data'):
505 if f.endswith(b'data'):
503 # 'dstbase' may be empty (e.g. revlog format 0)
506 # 'dstbase' may be empty (e.g. revlog format 0)
504 lockfile = os.path.join(dstbase, b"lock")
507 lockfile = os.path.join(dstbase, b"lock")
505 # lock to avoid premature writing to the target
508 # lock to avoid premature writing to the target
506 destlock = lock.lock(dstvfs, lockfile)
509 destlock = lock.lock(dstvfs, lockfile)
507 hardlink, n = util.copyfiles(
510 hardlink, n = util.copyfiles(
508 srcvfs.join(f), dstvfs.join(f), hardlink, progress
511 srcvfs.join(f), dstvfs.join(f), hardlink, progress
509 )
512 )
510 num += n
513 num += n
511 if hardlink:
514 if hardlink:
512 ui.debug(b"linked %d files\n" % num)
515 ui.debug(b"linked %d files\n" % num)
513 else:
516 else:
514 ui.debug(b"copied %d files\n" % num)
517 ui.debug(b"copied %d files\n" % num)
515 return destlock
518 return destlock
516 except: # re-raises
519 except: # re-raises
517 release(destlock)
520 release(destlock)
518 raise
521 raise
519
522
520
523
521 def clonewithshare(
524 def clonewithshare(
522 ui,
525 ui,
523 peeropts,
526 peeropts,
524 sharepath,
527 sharepath,
525 source,
528 source,
526 srcpeer,
529 srcpeer,
527 dest,
530 dest,
528 pull=False,
531 pull=False,
529 rev=None,
532 rev=None,
530 update=True,
533 update=True,
531 stream=False,
534 stream=False,
532 ):
535 ):
533 """Perform a clone using a shared repo.
536 """Perform a clone using a shared repo.
534
537
535 The store for the repository will be located at <sharepath>/.hg. The
538 The store for the repository will be located at <sharepath>/.hg. The
536 specified revisions will be cloned or pulled from "source". A shared repo
539 specified revisions will be cloned or pulled from "source". A shared repo
537 will be created at "dest" and a working copy will be created if "update" is
540 will be created at "dest" and a working copy will be created if "update" is
538 True.
541 True.
539 """
542 """
540 revs = None
543 revs = None
541 if rev:
544 if rev:
542 if not srcpeer.capable(b'lookup'):
545 if not srcpeer.capable(b'lookup'):
543 raise error.Abort(
546 raise error.Abort(
544 _(
547 _(
545 b"src repository does not support "
548 b"src repository does not support "
546 b"revision lookup and so doesn't "
549 b"revision lookup and so doesn't "
547 b"support clone by revision"
550 b"support clone by revision"
548 )
551 )
549 )
552 )
550
553
551 # TODO this is batchable.
554 # TODO this is batchable.
552 remoterevs = []
555 remoterevs = []
553 for r in rev:
556 for r in rev:
554 with srcpeer.commandexecutor() as e:
557 with srcpeer.commandexecutor() as e:
555 remoterevs.append(
558 remoterevs.append(
556 e.callcommand(
559 e.callcommand(
557 b'lookup',
560 b'lookup',
558 {
561 {
559 b'key': r,
562 b'key': r,
560 },
563 },
561 ).result()
564 ).result()
562 )
565 )
563 revs = remoterevs
566 revs = remoterevs
564
567
565 # Obtain a lock before checking for or cloning the pooled repo otherwise
568 # Obtain a lock before checking for or cloning the pooled repo otherwise
566 # 2 clients may race creating or populating it.
569 # 2 clients may race creating or populating it.
567 pooldir = os.path.dirname(sharepath)
570 pooldir = os.path.dirname(sharepath)
568 # lock class requires the directory to exist.
571 # lock class requires the directory to exist.
569 try:
572 try:
570 util.makedir(pooldir, False)
573 util.makedir(pooldir, False)
571 except FileExistsError:
574 except FileExistsError:
572 pass
575 pass
573
576
574 poolvfs = vfsmod.vfs(pooldir)
577 poolvfs = vfsmod.vfs(pooldir)
575 basename = os.path.basename(sharepath)
578 basename = os.path.basename(sharepath)
576
579
577 with lock.lock(poolvfs, b'%s.lock' % basename):
580 with lock.lock(poolvfs, b'%s.lock' % basename):
578 if os.path.exists(sharepath):
581 if os.path.exists(sharepath):
579 ui.status(
582 ui.status(
580 _(b'(sharing from existing pooled repository %s)\n') % basename
583 _(b'(sharing from existing pooled repository %s)\n') % basename
581 )
584 )
582 else:
585 else:
583 ui.status(
586 ui.status(
584 _(b'(sharing from new pooled repository %s)\n') % basename
587 _(b'(sharing from new pooled repository %s)\n') % basename
585 )
588 )
586 # Always use pull mode because hardlinks in share mode don't work
589 # Always use pull mode because hardlinks in share mode don't work
587 # well. Never update because working copies aren't necessary in
590 # well. Never update because working copies aren't necessary in
588 # share mode.
591 # share mode.
589 clone(
592 clone(
590 ui,
593 ui,
591 peeropts,
594 peeropts,
592 source,
595 source,
593 dest=sharepath,
596 dest=sharepath,
594 pull=True,
597 pull=True,
595 revs=rev,
598 revs=rev,
596 update=False,
599 update=False,
597 stream=stream,
600 stream=stream,
598 )
601 )
599
602
600 # Resolve the value to put in [paths] section for the source.
603 # Resolve the value to put in [paths] section for the source.
601 if islocal(source):
604 if islocal(source):
602 defaultpath = util.abspath(urlutil.urllocalpath(source))
605 defaultpath = util.abspath(urlutil.urllocalpath(source))
603 else:
606 else:
604 defaultpath = source
607 defaultpath = source
605
608
606 sharerepo = repository(ui, path=sharepath)
609 sharerepo = repository(ui, path=sharepath)
607 destrepo = share(
610 destrepo = share(
608 ui,
611 ui,
609 sharerepo,
612 sharerepo,
610 dest=dest,
613 dest=dest,
611 update=False,
614 update=False,
612 bookmarks=False,
615 bookmarks=False,
613 defaultpath=defaultpath,
616 defaultpath=defaultpath,
614 )
617 )
615
618
616 # We need to perform a pull against the dest repo to fetch bookmarks
619 # We need to perform a pull against the dest repo to fetch bookmarks
617 # and other non-store data that isn't shared by default. In the case of
620 # and other non-store data that isn't shared by default. In the case of
618 # non-existing shared repo, this means we pull from the remote twice. This
621 # non-existing shared repo, this means we pull from the remote twice. This
619 # is a bit weird. But at the time it was implemented, there wasn't an easy
622 # is a bit weird. But at the time it was implemented, there wasn't an easy
620 # way to pull just non-changegroup data.
623 # way to pull just non-changegroup data.
621 exchange.pull(destrepo, srcpeer, heads=revs)
624 exchange.pull(destrepo, srcpeer, heads=revs)
622
625
623 _postshareupdate(destrepo, update)
626 _postshareupdate(destrepo, update)
624
627
625 return srcpeer, peer(ui, peeropts, dest)
628 return srcpeer, peer(ui, peeropts, dest)
626
629
627
630
628 # Recomputing caches is often slow on big repos, so copy them.
631 # Recomputing caches is often slow on big repos, so copy them.
629 def _copycache(srcrepo, dstcachedir, fname):
632 def _copycache(srcrepo, dstcachedir, fname):
630 """copy a cache from srcrepo to destcachedir (if it exists)"""
633 """copy a cache from srcrepo to destcachedir (if it exists)"""
631 srcfname = srcrepo.cachevfs.join(fname)
634 srcfname = srcrepo.cachevfs.join(fname)
632 dstfname = os.path.join(dstcachedir, fname)
635 dstfname = os.path.join(dstcachedir, fname)
633 if os.path.exists(srcfname):
636 if os.path.exists(srcfname):
634 if not os.path.exists(dstcachedir):
637 if not os.path.exists(dstcachedir):
635 os.mkdir(dstcachedir)
638 os.mkdir(dstcachedir)
636 util.copyfile(srcfname, dstfname)
639 util.copyfile(srcfname, dstfname)
637
640
638
641
639 def clone(
642 def clone(
640 ui,
643 ui,
641 peeropts,
644 peeropts,
642 source,
645 source,
643 dest=None,
646 dest=None,
644 pull=False,
647 pull=False,
645 revs=None,
648 revs=None,
646 update=True,
649 update=True,
647 stream=False,
650 stream=False,
648 branch=None,
651 branch=None,
649 shareopts=None,
652 shareopts=None,
650 storeincludepats=None,
653 storeincludepats=None,
651 storeexcludepats=None,
654 storeexcludepats=None,
652 depth=None,
655 depth=None,
653 ):
656 ):
654 """Make a copy of an existing repository.
657 """Make a copy of an existing repository.
655
658
656 Create a copy of an existing repository in a new directory. The
659 Create a copy of an existing repository in a new directory. The
657 source and destination are URLs, as passed to the repository
660 source and destination are URLs, as passed to the repository
658 function. Returns a pair of repository peers, the source and
661 function. Returns a pair of repository peers, the source and
659 newly created destination.
662 newly created destination.
660
663
661 The location of the source is added to the new repository's
664 The location of the source is added to the new repository's
662 .hg/hgrc file, as the default to be used for future pulls and
665 .hg/hgrc file, as the default to be used for future pulls and
663 pushes.
666 pushes.
664
667
665 If an exception is raised, the partly cloned/updated destination
668 If an exception is raised, the partly cloned/updated destination
666 repository will be deleted.
669 repository will be deleted.
667
670
668 Arguments:
671 Arguments:
669
672
670 source: repository object or URL
673 source: repository object or URL
671
674
672 dest: URL of destination repository to create (defaults to base
675 dest: URL of destination repository to create (defaults to base
673 name of source repository)
676 name of source repository)
674
677
675 pull: always pull from source repository, even in local case or if the
678 pull: always pull from source repository, even in local case or if the
676 server prefers streaming
679 server prefers streaming
677
680
678 stream: stream raw data uncompressed from repository (fast over
681 stream: stream raw data uncompressed from repository (fast over
679 LAN, slow over WAN)
682 LAN, slow over WAN)
680
683
681 revs: revision to clone up to (implies pull=True)
684 revs: revision to clone up to (implies pull=True)
682
685
683 update: update working directory after clone completes, if
686 update: update working directory after clone completes, if
684 destination is local repository (True means update to default rev,
687 destination is local repository (True means update to default rev,
685 anything else is treated as a revision)
688 anything else is treated as a revision)
686
689
687 branch: branches to clone
690 branch: branches to clone
688
691
689 shareopts: dict of options to control auto sharing behavior. The "pool" key
692 shareopts: dict of options to control auto sharing behavior. The "pool" key
690 activates auto sharing mode and defines the directory for stores. The
693 activates auto sharing mode and defines the directory for stores. The
691 "mode" key determines how to construct the directory name of the shared
694 "mode" key determines how to construct the directory name of the shared
692 repository. "identity" means the name is derived from the node of the first
695 repository. "identity" means the name is derived from the node of the first
693 changeset in the repository. "remote" means the name is derived from the
696 changeset in the repository. "remote" means the name is derived from the
694 remote's path/URL. Defaults to "identity."
697 remote's path/URL. Defaults to "identity."
695
698
696 storeincludepats and storeexcludepats: sets of file patterns to include and
699 storeincludepats and storeexcludepats: sets of file patterns to include and
697 exclude in the repository copy, respectively. If not defined, all files
700 exclude in the repository copy, respectively. If not defined, all files
698 will be included (a "full" clone). Otherwise a "narrow" clone containing
701 will be included (a "full" clone). Otherwise a "narrow" clone containing
699 only the requested files will be performed. If ``storeincludepats`` is not
702 only the requested files will be performed. If ``storeincludepats`` is not
700 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
703 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
701 ``path:.``. If both are empty sets, no files will be cloned.
704 ``path:.``. If both are empty sets, no files will be cloned.
702 """
705 """
703
706
704 if isinstance(source, bytes):
707 if isinstance(source, bytes):
705 src = urlutil.get_clone_path(ui, source, branch)
708 src = urlutil.get_clone_path(ui, source, branch)
706 origsource, source, branches = src
709 origsource, source, branches = src
707 srcpeer = peer(ui, peeropts, source)
710 srcpeer = peer(ui, peeropts, source)
708 else:
711 else:
709 srcpeer = source.peer() # in case we were called with a localrepo
712 srcpeer = source.peer() # in case we were called with a localrepo
710 branches = (None, branch or [])
713 branches = (None, branch or [])
711 origsource = source = srcpeer.url()
714 origsource = source = srcpeer.url()
712 srclock = destlock = destwlock = cleandir = None
715 srclock = destlock = destwlock = cleandir = None
713 destpeer = None
716 destpeer = None
714 try:
717 try:
715 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
718 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
716
719
717 if dest is None:
720 if dest is None:
718 dest = defaultdest(source)
721 dest = defaultdest(source)
719 if dest:
722 if dest:
720 ui.status(_(b"destination directory: %s\n") % dest)
723 ui.status(_(b"destination directory: %s\n") % dest)
721 else:
724 else:
722 dest = urlutil.get_clone_path(ui, dest)[0]
725 dest = urlutil.get_clone_path(ui, dest)[0]
723
726
724 dest = urlutil.urllocalpath(dest)
727 dest = urlutil.urllocalpath(dest)
725 source = urlutil.urllocalpath(source)
728 source = urlutil.urllocalpath(source)
726
729
727 if not dest:
730 if not dest:
728 raise error.InputError(_(b"empty destination path is not valid"))
731 raise error.InputError(_(b"empty destination path is not valid"))
729
732
730 destvfs = vfsmod.vfs(dest, expandpath=True)
733 destvfs = vfsmod.vfs(dest, expandpath=True)
731 if destvfs.lexists():
734 if destvfs.lexists():
732 if not destvfs.isdir():
735 if not destvfs.isdir():
733 raise error.InputError(
736 raise error.InputError(
734 _(b"destination '%s' already exists") % dest
737 _(b"destination '%s' already exists") % dest
735 )
738 )
736 elif destvfs.listdir():
739 elif destvfs.listdir():
737 raise error.InputError(
740 raise error.InputError(
738 _(b"destination '%s' is not empty") % dest
741 _(b"destination '%s' is not empty") % dest
739 )
742 )
740
743
741 createopts = {}
744 createopts = {}
742 narrow = False
745 narrow = False
743
746
744 if storeincludepats is not None:
747 if storeincludepats is not None:
745 narrowspec.validatepatterns(storeincludepats)
748 narrowspec.validatepatterns(storeincludepats)
746 narrow = True
749 narrow = True
747
750
748 if storeexcludepats is not None:
751 if storeexcludepats is not None:
749 narrowspec.validatepatterns(storeexcludepats)
752 narrowspec.validatepatterns(storeexcludepats)
750 narrow = True
753 narrow = True
751
754
752 if narrow:
755 if narrow:
753 # Include everything by default if only exclusion patterns defined.
756 # Include everything by default if only exclusion patterns defined.
754 if storeexcludepats and not storeincludepats:
757 if storeexcludepats and not storeincludepats:
755 storeincludepats = {b'path:.'}
758 storeincludepats = {b'path:.'}
756
759
757 createopts[b'narrowfiles'] = True
760 createopts[b'narrowfiles'] = True
758
761
759 if depth:
762 if depth:
760 createopts[b'shallowfilestore'] = True
763 createopts[b'shallowfilestore'] = True
761
764
762 if srcpeer.capable(b'lfs-serve'):
765 if srcpeer.capable(b'lfs-serve'):
763 # Repository creation honors the config if it disabled the extension, so
766 # Repository creation honors the config if it disabled the extension, so
764 # we can't just announce that lfs will be enabled. This check avoids
767 # we can't just announce that lfs will be enabled. This check avoids
765 # saying that lfs will be enabled, and then saying it's an unknown
768 # saying that lfs will be enabled, and then saying it's an unknown
766 # feature. The lfs creation option is set in either case so that a
769 # feature. The lfs creation option is set in either case so that a
767 # requirement is added. If the extension is explicitly disabled but the
770 # requirement is added. If the extension is explicitly disabled but the
768 # requirement is set, the clone aborts early, before transferring any
771 # requirement is set, the clone aborts early, before transferring any
769 # data.
772 # data.
770 createopts[b'lfs'] = True
773 createopts[b'lfs'] = True
771
774
772 if extensions.disabled_help(b'lfs'):
775 if extensions.disabled_help(b'lfs'):
773 ui.status(
776 ui.status(
774 _(
777 _(
775 b'(remote is using large file support (lfs), but it is '
778 b'(remote is using large file support (lfs), but it is '
776 b'explicitly disabled in the local configuration)\n'
779 b'explicitly disabled in the local configuration)\n'
777 )
780 )
778 )
781 )
779 else:
782 else:
780 ui.status(
783 ui.status(
781 _(
784 _(
782 b'(remote is using large file support (lfs); lfs will '
785 b'(remote is using large file support (lfs); lfs will '
783 b'be enabled for this repository)\n'
786 b'be enabled for this repository)\n'
784 )
787 )
785 )
788 )
786
789
787 shareopts = shareopts or {}
790 shareopts = shareopts or {}
788 sharepool = shareopts.get(b'pool')
791 sharepool = shareopts.get(b'pool')
789 sharenamemode = shareopts.get(b'mode')
792 sharenamemode = shareopts.get(b'mode')
790 if sharepool and islocal(dest):
793 if sharepool and islocal(dest):
791 sharepath = None
794 sharepath = None
792 if sharenamemode == b'identity':
795 if sharenamemode == b'identity':
793 # Resolve the name from the initial changeset in the remote
796 # Resolve the name from the initial changeset in the remote
794 # repository. This returns nullid when the remote is empty. It
797 # repository. This returns nullid when the remote is empty. It
795 # raises RepoLookupError if revision 0 is filtered or otherwise
798 # raises RepoLookupError if revision 0 is filtered or otherwise
796 # not available. If we fail to resolve, sharing is not enabled.
799 # not available. If we fail to resolve, sharing is not enabled.
797 try:
800 try:
798 with srcpeer.commandexecutor() as e:
801 with srcpeer.commandexecutor() as e:
799 rootnode = e.callcommand(
802 rootnode = e.callcommand(
800 b'lookup',
803 b'lookup',
801 {
804 {
802 b'key': b'0',
805 b'key': b'0',
803 },
806 },
804 ).result()
807 ).result()
805
808
806 if rootnode != sha1nodeconstants.nullid:
809 if rootnode != sha1nodeconstants.nullid:
807 sharepath = os.path.join(sharepool, hex(rootnode))
810 sharepath = os.path.join(sharepool, hex(rootnode))
808 else:
811 else:
809 ui.status(
812 ui.status(
810 _(
813 _(
811 b'(not using pooled storage: '
814 b'(not using pooled storage: '
812 b'remote appears to be empty)\n'
815 b'remote appears to be empty)\n'
813 )
816 )
814 )
817 )
815 except error.RepoLookupError:
818 except error.RepoLookupError:
816 ui.status(
819 ui.status(
817 _(
820 _(
818 b'(not using pooled storage: '
821 b'(not using pooled storage: '
819 b'unable to resolve identity of remote)\n'
822 b'unable to resolve identity of remote)\n'
820 )
823 )
821 )
824 )
822 elif sharenamemode == b'remote':
825 elif sharenamemode == b'remote':
823 sharepath = os.path.join(
826 sharepath = os.path.join(
824 sharepool, hex(hashutil.sha1(source).digest())
827 sharepool, hex(hashutil.sha1(source).digest())
825 )
828 )
826 else:
829 else:
827 raise error.Abort(
830 raise error.Abort(
828 _(b'unknown share naming mode: %s') % sharenamemode
831 _(b'unknown share naming mode: %s') % sharenamemode
829 )
832 )
830
833
831 # TODO this is a somewhat arbitrary restriction.
834 # TODO this is a somewhat arbitrary restriction.
832 if narrow:
835 if narrow:
833 ui.status(
836 ui.status(
834 _(b'(pooled storage not supported for narrow clones)\n')
837 _(b'(pooled storage not supported for narrow clones)\n')
835 )
838 )
836 sharepath = None
839 sharepath = None
837
840
838 if sharepath:
841 if sharepath:
839 return clonewithshare(
842 return clonewithshare(
840 ui,
843 ui,
841 peeropts,
844 peeropts,
842 sharepath,
845 sharepath,
843 source,
846 source,
844 srcpeer,
847 srcpeer,
845 dest,
848 dest,
846 pull=pull,
849 pull=pull,
847 rev=revs,
850 rev=revs,
848 update=update,
851 update=update,
849 stream=stream,
852 stream=stream,
850 )
853 )
851
854
852 srcrepo = srcpeer.local()
855 srcrepo = srcpeer.local()
853
856
854 abspath = origsource
857 abspath = origsource
855 if islocal(origsource):
858 if islocal(origsource):
856 abspath = util.abspath(urlutil.urllocalpath(origsource))
859 abspath = util.abspath(urlutil.urllocalpath(origsource))
857
860
858 if islocal(dest):
861 if islocal(dest):
859 if os.path.exists(dest):
862 if os.path.exists(dest):
860 # only clean up directories we create ourselves
863 # only clean up directories we create ourselves
861 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
864 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
862 cleandir = hgdir
865 cleandir = hgdir
863 else:
866 else:
864 cleandir = dest
867 cleandir = dest
865
868
866 copy = False
869 copy = False
867 if (
870 if (
868 srcrepo
871 srcrepo
869 and srcrepo.cancopy()
872 and srcrepo.cancopy()
870 and islocal(dest)
873 and islocal(dest)
871 and not phases.hassecret(srcrepo)
874 and not phases.hassecret(srcrepo)
872 ):
875 ):
873 copy = not pull and not revs
876 copy = not pull and not revs
874
877
875 # TODO this is a somewhat arbitrary restriction.
878 # TODO this is a somewhat arbitrary restriction.
876 if narrow:
879 if narrow:
877 copy = False
880 copy = False
878
881
879 if copy:
882 if copy:
880 try:
883 try:
881 # we use a lock here because if we race with commit, we
884 # we use a lock here because if we race with commit, we
882 # can end up with extra data in the cloned revlogs that's
885 # can end up with extra data in the cloned revlogs that's
883 # not pointed to by changesets, thus causing verify to
886 # not pointed to by changesets, thus causing verify to
884 # fail
887 # fail
885 srclock = srcrepo.lock(wait=False)
888 srclock = srcrepo.lock(wait=False)
886 except error.LockError:
889 except error.LockError:
887 copy = False
890 copy = False
888
891
889 if copy:
892 if copy:
890 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
893 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
891
894
892 destrootpath = urlutil.urllocalpath(dest)
895 destrootpath = urlutil.urllocalpath(dest)
893 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
896 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
894 localrepo.createrepository(
897 localrepo.createrepository(
895 ui,
898 ui,
896 destrootpath,
899 destrootpath,
897 requirements=dest_reqs,
900 requirements=dest_reqs,
898 )
901 )
899 destrepo = localrepo.makelocalrepository(ui, destrootpath)
902 destrepo = localrepo.makelocalrepository(ui, destrootpath)
900
903
901 destwlock = destrepo.wlock()
904 destwlock = destrepo.wlock()
902 destlock = destrepo.lock()
905 destlock = destrepo.lock()
903 from . import streamclone # avoid cycle
906 from . import streamclone # avoid cycle
904
907
905 streamclone.local_copy(srcrepo, destrepo)
908 streamclone.local_copy(srcrepo, destrepo)
906
909
907 # we need to re-init the repo after manually copying the data
910 # we need to re-init the repo after manually copying the data
908 # into it
911 # into it
909 destpeer = peer(srcrepo, peeropts, dest)
912 destpeer = peer(srcrepo, peeropts, dest)
910
913
911 # make the peer aware that is it already locked
914 # make the peer aware that is it already locked
912 #
915 #
913 # important:
916 # important:
914 #
917 #
915 # We still need to release that lock at the end of the function
918 # We still need to release that lock at the end of the function
916 destpeer.local()._lockref = weakref.ref(destlock)
919 destpeer.local()._lockref = weakref.ref(destlock)
917 destpeer.local()._wlockref = weakref.ref(destwlock)
920 destpeer.local()._wlockref = weakref.ref(destwlock)
918 # dirstate also needs to be copied because `_wlockref` has a reference
921 # dirstate also needs to be copied because `_wlockref` has a reference
919 # to it: this dirstate is saved to disk when the wlock is released
922 # to it: this dirstate is saved to disk when the wlock is released
920 destpeer.local().dirstate = destrepo.dirstate
923 destpeer.local().dirstate = destrepo.dirstate
921
924
922 srcrepo.hook(
925 srcrepo.hook(
923 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
926 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
924 )
927 )
925 else:
928 else:
926 try:
929 try:
927 # only pass ui when no srcrepo
930 # only pass ui when no srcrepo
928 destpeer = peer(
931 destpeer = peer(
929 srcrepo or ui,
932 srcrepo or ui,
930 peeropts,
933 peeropts,
931 dest,
934 dest,
932 create=True,
935 create=True,
933 createopts=createopts,
936 createopts=createopts,
934 )
937 )
935 except FileExistsError:
938 except FileExistsError:
936 cleandir = None
939 cleandir = None
937 raise error.Abort(_(b"destination '%s' already exists") % dest)
940 raise error.Abort(_(b"destination '%s' already exists") % dest)
938
941
939 if revs:
942 if revs:
940 if not srcpeer.capable(b'lookup'):
943 if not srcpeer.capable(b'lookup'):
941 raise error.Abort(
944 raise error.Abort(
942 _(
945 _(
943 b"src repository does not support "
946 b"src repository does not support "
944 b"revision lookup and so doesn't "
947 b"revision lookup and so doesn't "
945 b"support clone by revision"
948 b"support clone by revision"
946 )
949 )
947 )
950 )
948
951
949 # TODO this is batchable.
952 # TODO this is batchable.
950 remoterevs = []
953 remoterevs = []
951 for rev in revs:
954 for rev in revs:
952 with srcpeer.commandexecutor() as e:
955 with srcpeer.commandexecutor() as e:
953 remoterevs.append(
956 remoterevs.append(
954 e.callcommand(
957 e.callcommand(
955 b'lookup',
958 b'lookup',
956 {
959 {
957 b'key': rev,
960 b'key': rev,
958 },
961 },
959 ).result()
962 ).result()
960 )
963 )
961 revs = remoterevs
964 revs = remoterevs
962
965
963 checkout = revs[0]
966 checkout = revs[0]
964 else:
967 else:
965 revs = None
968 revs = None
966 local = destpeer.local()
969 local = destpeer.local()
967 if local:
970 if local:
968 if narrow:
971 if narrow:
969 with local.wlock(), local.lock():
972 with local.wlock(), local.lock():
970 local.setnarrowpats(storeincludepats, storeexcludepats)
973 local.setnarrowpats(storeincludepats, storeexcludepats)
971 narrowspec.copytoworkingcopy(local)
974 narrowspec.copytoworkingcopy(local)
972
975
973 u = urlutil.url(abspath)
976 u = urlutil.url(abspath)
974 defaulturl = bytes(u)
977 defaulturl = bytes(u)
975 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
978 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
976 if not stream:
979 if not stream:
977 if pull:
980 if pull:
978 stream = False
981 stream = False
979 else:
982 else:
980 stream = None
983 stream = None
981 # internal config: ui.quietbookmarkmove
984 # internal config: ui.quietbookmarkmove
982 overrides = {(b'ui', b'quietbookmarkmove'): True}
985 overrides = {(b'ui', b'quietbookmarkmove'): True}
983 with local.ui.configoverride(overrides, b'clone'):
986 with local.ui.configoverride(overrides, b'clone'):
984 exchange.pull(
987 exchange.pull(
985 local,
988 local,
986 srcpeer,
989 srcpeer,
987 heads=revs,
990 heads=revs,
988 streamclonerequested=stream,
991 streamclonerequested=stream,
989 includepats=storeincludepats,
992 includepats=storeincludepats,
990 excludepats=storeexcludepats,
993 excludepats=storeexcludepats,
991 depth=depth,
994 depth=depth,
992 )
995 )
993 elif srcrepo:
996 elif srcrepo:
994 # TODO lift restriction once exchange.push() accepts narrow
997 # TODO lift restriction once exchange.push() accepts narrow
995 # push.
998 # push.
996 if narrow:
999 if narrow:
997 raise error.Abort(
1000 raise error.Abort(
998 _(
1001 _(
999 b'narrow clone not available for '
1002 b'narrow clone not available for '
1000 b'remote destinations'
1003 b'remote destinations'
1001 )
1004 )
1002 )
1005 )
1003
1006
1004 exchange.push(
1007 exchange.push(
1005 srcrepo,
1008 srcrepo,
1006 destpeer,
1009 destpeer,
1007 revs=revs,
1010 revs=revs,
1008 bookmarks=srcrepo._bookmarks.keys(),
1011 bookmarks=srcrepo._bookmarks.keys(),
1009 )
1012 )
1010 else:
1013 else:
1011 raise error.Abort(
1014 raise error.Abort(
1012 _(b"clone from remote to remote not supported")
1015 _(b"clone from remote to remote not supported")
1013 )
1016 )
1014
1017
1015 cleandir = None
1018 cleandir = None
1016
1019
1017 destrepo = destpeer.local()
1020 destrepo = destpeer.local()
1018 if destrepo:
1021 if destrepo:
1019 template = uimod.samplehgrcs[b'cloned']
1022 template = uimod.samplehgrcs[b'cloned']
1020 u = urlutil.url(abspath)
1023 u = urlutil.url(abspath)
1021 u.passwd = None
1024 u.passwd = None
1022 defaulturl = bytes(u)
1025 defaulturl = bytes(u)
1023 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1026 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1024 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1027 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1025
1028
1026 if ui.configbool(b'experimental', b'remotenames'):
1029 if ui.configbool(b'experimental', b'remotenames'):
1027 logexchange.pullremotenames(destrepo, srcpeer)
1030 logexchange.pullremotenames(destrepo, srcpeer)
1028
1031
1029 if update:
1032 if update:
1030 if update is not True:
1033 if update is not True:
1031 with srcpeer.commandexecutor() as e:
1034 with srcpeer.commandexecutor() as e:
1032 checkout = e.callcommand(
1035 checkout = e.callcommand(
1033 b'lookup',
1036 b'lookup',
1034 {
1037 {
1035 b'key': update,
1038 b'key': update,
1036 },
1039 },
1037 ).result()
1040 ).result()
1038
1041
1039 uprev = None
1042 uprev = None
1040 status = None
1043 status = None
1041 if checkout is not None:
1044 if checkout is not None:
1042 # Some extensions (at least hg-git and hg-subversion) have
1045 # Some extensions (at least hg-git and hg-subversion) have
1043 # a peer.lookup() implementation that returns a name instead
1046 # a peer.lookup() implementation that returns a name instead
1044 # of a nodeid. We work around it here until we've figured
1047 # of a nodeid. We work around it here until we've figured
1045 # out a better solution.
1048 # out a better solution.
1046 if len(checkout) == 20 and checkout in destrepo:
1049 if len(checkout) == 20 and checkout in destrepo:
1047 uprev = checkout
1050 uprev = checkout
1048 elif scmutil.isrevsymbol(destrepo, checkout):
1051 elif scmutil.isrevsymbol(destrepo, checkout):
1049 uprev = scmutil.revsymbol(destrepo, checkout).node()
1052 uprev = scmutil.revsymbol(destrepo, checkout).node()
1050 else:
1053 else:
1051 if update is not True:
1054 if update is not True:
1052 try:
1055 try:
1053 uprev = destrepo.lookup(update)
1056 uprev = destrepo.lookup(update)
1054 except error.RepoLookupError:
1057 except error.RepoLookupError:
1055 pass
1058 pass
1056 if uprev is None:
1059 if uprev is None:
1057 try:
1060 try:
1058 if destrepo._activebookmark:
1061 if destrepo._activebookmark:
1059 uprev = destrepo.lookup(destrepo._activebookmark)
1062 uprev = destrepo.lookup(destrepo._activebookmark)
1060 update = destrepo._activebookmark
1063 update = destrepo._activebookmark
1061 else:
1064 else:
1062 uprev = destrepo._bookmarks[b'@']
1065 uprev = destrepo._bookmarks[b'@']
1063 update = b'@'
1066 update = b'@'
1064 bn = destrepo[uprev].branch()
1067 bn = destrepo[uprev].branch()
1065 if bn == b'default':
1068 if bn == b'default':
1066 status = _(b"updating to bookmark %s\n" % update)
1069 status = _(b"updating to bookmark %s\n" % update)
1067 else:
1070 else:
1068 status = (
1071 status = (
1069 _(b"updating to bookmark %s on branch %s\n")
1072 _(b"updating to bookmark %s on branch %s\n")
1070 ) % (update, bn)
1073 ) % (update, bn)
1071 except KeyError:
1074 except KeyError:
1072 try:
1075 try:
1073 uprev = destrepo.branchtip(b'default')
1076 uprev = destrepo.branchtip(b'default')
1074 except error.RepoLookupError:
1077 except error.RepoLookupError:
1075 uprev = destrepo.lookup(b'tip')
1078 uprev = destrepo.lookup(b'tip')
1076 if not status:
1079 if not status:
1077 bn = destrepo[uprev].branch()
1080 bn = destrepo[uprev].branch()
1078 status = _(b"updating to branch %s\n") % bn
1081 status = _(b"updating to branch %s\n") % bn
1079 destrepo.ui.status(status)
1082 destrepo.ui.status(status)
1080 _update(destrepo, uprev)
1083 _update(destrepo, uprev)
1081 if update in destrepo._bookmarks:
1084 if update in destrepo._bookmarks:
1082 bookmarks.activate(destrepo, update)
1085 bookmarks.activate(destrepo, update)
1083 if destlock is not None:
1086 if destlock is not None:
1084 release(destlock)
1087 release(destlock)
1085 if destwlock is not None:
1088 if destwlock is not None:
1086 release(destlock)
1089 release(destlock)
1087 # here is a tiny windows were someone could end up writing the
1090 # here is a tiny windows were someone could end up writing the
1088 # repository before the cache are sure to be warm. This is "fine"
1091 # repository before the cache are sure to be warm. This is "fine"
1089 # as the only "bad" outcome would be some slowness. That potential
1092 # as the only "bad" outcome would be some slowness. That potential
1090 # slowness already affect reader.
1093 # slowness already affect reader.
1091 with destrepo.lock():
1094 with destrepo.lock():
1092 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1095 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1093 finally:
1096 finally:
1094 release(srclock, destlock, destwlock)
1097 release(srclock, destlock, destwlock)
1095 if cleandir is not None:
1098 if cleandir is not None:
1096 shutil.rmtree(cleandir, True)
1099 shutil.rmtree(cleandir, True)
1097 if srcpeer is not None:
1100 if srcpeer is not None:
1098 srcpeer.close()
1101 srcpeer.close()
1099 if destpeer and destpeer.local() is None:
1102 if destpeer and destpeer.local() is None:
1100 destpeer.close()
1103 destpeer.close()
1101 return srcpeer, destpeer
1104 return srcpeer, destpeer
1102
1105
1103
1106
1104 def _showstats(repo, stats, quietempty=False):
1107 def _showstats(repo, stats, quietempty=False):
1105 if quietempty and stats.isempty():
1108 if quietempty and stats.isempty():
1106 return
1109 return
1107 repo.ui.status(
1110 repo.ui.status(
1108 _(
1111 _(
1109 b"%d files updated, %d files merged, "
1112 b"%d files updated, %d files merged, "
1110 b"%d files removed, %d files unresolved\n"
1113 b"%d files removed, %d files unresolved\n"
1111 )
1114 )
1112 % (
1115 % (
1113 stats.updatedcount,
1116 stats.updatedcount,
1114 stats.mergedcount,
1117 stats.mergedcount,
1115 stats.removedcount,
1118 stats.removedcount,
1116 stats.unresolvedcount,
1119 stats.unresolvedcount,
1117 )
1120 )
1118 )
1121 )
1119
1122
1120
1123
1121 def updaterepo(repo, node, overwrite, updatecheck=None):
1124 def updaterepo(repo, node, overwrite, updatecheck=None):
1122 """Update the working directory to node.
1125 """Update the working directory to node.
1123
1126
1124 When overwrite is set, changes are clobbered, merged else
1127 When overwrite is set, changes are clobbered, merged else
1125
1128
1126 returns stats (see pydoc mercurial.merge.applyupdates)"""
1129 returns stats (see pydoc mercurial.merge.applyupdates)"""
1127 repo.ui.deprecwarn(
1130 repo.ui.deprecwarn(
1128 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1131 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1129 b'5.7',
1132 b'5.7',
1130 )
1133 )
1131 return mergemod._update(
1134 return mergemod._update(
1132 repo,
1135 repo,
1133 node,
1136 node,
1134 branchmerge=False,
1137 branchmerge=False,
1135 force=overwrite,
1138 force=overwrite,
1136 labels=[b'working copy', b'destination'],
1139 labels=[b'working copy', b'destination'],
1137 updatecheck=updatecheck,
1140 updatecheck=updatecheck,
1138 )
1141 )
1139
1142
1140
1143
1141 def update(repo, node, quietempty=False, updatecheck=None):
1144 def update(repo, node, quietempty=False, updatecheck=None):
1142 """update the working directory to node"""
1145 """update the working directory to node"""
1143 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1146 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1144 _showstats(repo, stats, quietempty)
1147 _showstats(repo, stats, quietempty)
1145 if stats.unresolvedcount:
1148 if stats.unresolvedcount:
1146 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1149 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1147 return stats.unresolvedcount > 0
1150 return stats.unresolvedcount > 0
1148
1151
1149
1152
1150 # naming conflict in clone()
1153 # naming conflict in clone()
1151 _update = update
1154 _update = update
1152
1155
1153
1156
1154 def clean(repo, node, show_stats=True, quietempty=False):
1157 def clean(repo, node, show_stats=True, quietempty=False):
1155 """forcibly switch the working directory to node, clobbering changes"""
1158 """forcibly switch the working directory to node, clobbering changes"""
1156 stats = mergemod.clean_update(repo[node])
1159 stats = mergemod.clean_update(repo[node])
1157 assert stats.unresolvedcount == 0
1160 assert stats.unresolvedcount == 0
1158 if show_stats:
1161 if show_stats:
1159 _showstats(repo, stats, quietempty)
1162 _showstats(repo, stats, quietempty)
1160 return False
1163 return False
1161
1164
1162
1165
1163 # naming conflict in updatetotally()
1166 # naming conflict in updatetotally()
1164 _clean = clean
1167 _clean = clean
1165
1168
1166 _VALID_UPDATECHECKS = {
1169 _VALID_UPDATECHECKS = {
1167 mergemod.UPDATECHECK_ABORT,
1170 mergemod.UPDATECHECK_ABORT,
1168 mergemod.UPDATECHECK_NONE,
1171 mergemod.UPDATECHECK_NONE,
1169 mergemod.UPDATECHECK_LINEAR,
1172 mergemod.UPDATECHECK_LINEAR,
1170 mergemod.UPDATECHECK_NO_CONFLICT,
1173 mergemod.UPDATECHECK_NO_CONFLICT,
1171 }
1174 }
1172
1175
1173
1176
1174 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1177 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1175 """Update the working directory with extra care for non-file components
1178 """Update the working directory with extra care for non-file components
1176
1179
1177 This takes care of non-file components below:
1180 This takes care of non-file components below:
1178
1181
1179 :bookmark: might be advanced or (in)activated
1182 :bookmark: might be advanced or (in)activated
1180
1183
1181 This takes arguments below:
1184 This takes arguments below:
1182
1185
1183 :checkout: to which revision the working directory is updated
1186 :checkout: to which revision the working directory is updated
1184 :brev: a name, which might be a bookmark to be activated after updating
1187 :brev: a name, which might be a bookmark to be activated after updating
1185 :clean: whether changes in the working directory can be discarded
1188 :clean: whether changes in the working directory can be discarded
1186 :updatecheck: how to deal with a dirty working directory
1189 :updatecheck: how to deal with a dirty working directory
1187
1190
1188 Valid values for updatecheck are the UPDATECHECK_* constants
1191 Valid values for updatecheck are the UPDATECHECK_* constants
1189 defined in the merge module. Passing `None` will result in using the
1192 defined in the merge module. Passing `None` will result in using the
1190 configured default.
1193 configured default.
1191
1194
1192 * ABORT: abort if the working directory is dirty
1195 * ABORT: abort if the working directory is dirty
1193 * NONE: don't check (merge working directory changes into destination)
1196 * NONE: don't check (merge working directory changes into destination)
1194 * LINEAR: check that update is linear before merging working directory
1197 * LINEAR: check that update is linear before merging working directory
1195 changes into destination
1198 changes into destination
1196 * NO_CONFLICT: check that the update does not result in file merges
1199 * NO_CONFLICT: check that the update does not result in file merges
1197
1200
1198 This returns whether conflict is detected at updating or not.
1201 This returns whether conflict is detected at updating or not.
1199 """
1202 """
1200 if updatecheck is None:
1203 if updatecheck is None:
1201 updatecheck = ui.config(b'commands', b'update.check')
1204 updatecheck = ui.config(b'commands', b'update.check')
1202 if updatecheck not in _VALID_UPDATECHECKS:
1205 if updatecheck not in _VALID_UPDATECHECKS:
1203 # If not configured, or invalid value configured
1206 # If not configured, or invalid value configured
1204 updatecheck = mergemod.UPDATECHECK_LINEAR
1207 updatecheck = mergemod.UPDATECHECK_LINEAR
1205 if updatecheck not in _VALID_UPDATECHECKS:
1208 if updatecheck not in _VALID_UPDATECHECKS:
1206 raise ValueError(
1209 raise ValueError(
1207 r'Invalid updatecheck value %r (can accept %r)'
1210 r'Invalid updatecheck value %r (can accept %r)'
1208 % (updatecheck, _VALID_UPDATECHECKS)
1211 % (updatecheck, _VALID_UPDATECHECKS)
1209 )
1212 )
1210 with repo.wlock():
1213 with repo.wlock():
1211 movemarkfrom = None
1214 movemarkfrom = None
1212 warndest = False
1215 warndest = False
1213 if checkout is None:
1216 if checkout is None:
1214 updata = destutil.destupdate(repo, clean=clean)
1217 updata = destutil.destupdate(repo, clean=clean)
1215 checkout, movemarkfrom, brev = updata
1218 checkout, movemarkfrom, brev = updata
1216 warndest = True
1219 warndest = True
1217
1220
1218 if clean:
1221 if clean:
1219 ret = _clean(repo, checkout)
1222 ret = _clean(repo, checkout)
1220 else:
1223 else:
1221 if updatecheck == mergemod.UPDATECHECK_ABORT:
1224 if updatecheck == mergemod.UPDATECHECK_ABORT:
1222 cmdutil.bailifchanged(repo, merge=False)
1225 cmdutil.bailifchanged(repo, merge=False)
1223 updatecheck = mergemod.UPDATECHECK_NONE
1226 updatecheck = mergemod.UPDATECHECK_NONE
1224 ret = _update(repo, checkout, updatecheck=updatecheck)
1227 ret = _update(repo, checkout, updatecheck=updatecheck)
1225
1228
1226 if not ret and movemarkfrom:
1229 if not ret and movemarkfrom:
1227 if movemarkfrom == repo[b'.'].node():
1230 if movemarkfrom == repo[b'.'].node():
1228 pass # no-op update
1231 pass # no-op update
1229 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1232 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1230 b = ui.label(repo._activebookmark, b'bookmarks.active')
1233 b = ui.label(repo._activebookmark, b'bookmarks.active')
1231 ui.status(_(b"updating bookmark %s\n") % b)
1234 ui.status(_(b"updating bookmark %s\n") % b)
1232 else:
1235 else:
1233 # this can happen with a non-linear update
1236 # this can happen with a non-linear update
1234 b = ui.label(repo._activebookmark, b'bookmarks')
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1235 ui.status(_(b"(leaving bookmark %s)\n") % b)
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1236 bookmarks.deactivate(repo)
1239 bookmarks.deactivate(repo)
1237 elif brev in repo._bookmarks:
1240 elif brev in repo._bookmarks:
1238 if brev != repo._activebookmark:
1241 if brev != repo._activebookmark:
1239 b = ui.label(brev, b'bookmarks.active')
1242 b = ui.label(brev, b'bookmarks.active')
1240 ui.status(_(b"(activating bookmark %s)\n") % b)
1243 ui.status(_(b"(activating bookmark %s)\n") % b)
1241 bookmarks.activate(repo, brev)
1244 bookmarks.activate(repo, brev)
1242 elif brev:
1245 elif brev:
1243 if repo._activebookmark:
1246 if repo._activebookmark:
1244 b = ui.label(repo._activebookmark, b'bookmarks')
1247 b = ui.label(repo._activebookmark, b'bookmarks')
1245 ui.status(_(b"(leaving bookmark %s)\n") % b)
1248 ui.status(_(b"(leaving bookmark %s)\n") % b)
1246 bookmarks.deactivate(repo)
1249 bookmarks.deactivate(repo)
1247
1250
1248 if warndest:
1251 if warndest:
1249 destutil.statusotherdests(ui, repo)
1252 destutil.statusotherdests(ui, repo)
1250
1253
1251 return ret
1254 return ret
1252
1255
1253
1256
1254 def merge(
1257 def merge(
1255 ctx,
1258 ctx,
1256 force=False,
1259 force=False,
1257 remind=True,
1260 remind=True,
1258 labels=None,
1261 labels=None,
1259 ):
1262 ):
1260 """Branch merge with node, resolving changes. Return true if any
1263 """Branch merge with node, resolving changes. Return true if any
1261 unresolved conflicts."""
1264 unresolved conflicts."""
1262 repo = ctx.repo()
1265 repo = ctx.repo()
1263 stats = mergemod.merge(ctx, force=force, labels=labels)
1266 stats = mergemod.merge(ctx, force=force, labels=labels)
1264 _showstats(repo, stats)
1267 _showstats(repo, stats)
1265 if stats.unresolvedcount:
1268 if stats.unresolvedcount:
1266 repo.ui.status(
1269 repo.ui.status(
1267 _(
1270 _(
1268 b"use 'hg resolve' to retry unresolved file merges "
1271 b"use 'hg resolve' to retry unresolved file merges "
1269 b"or 'hg merge --abort' to abandon\n"
1272 b"or 'hg merge --abort' to abandon\n"
1270 )
1273 )
1271 )
1274 )
1272 elif remind:
1275 elif remind:
1273 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1276 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1274 return stats.unresolvedcount > 0
1277 return stats.unresolvedcount > 0
1275
1278
1276
1279
1277 def abortmerge(ui, repo):
1280 def abortmerge(ui, repo):
1278 ms = mergestatemod.mergestate.read(repo)
1281 ms = mergestatemod.mergestate.read(repo)
1279 if ms.active():
1282 if ms.active():
1280 # there were conflicts
1283 # there were conflicts
1281 node = ms.localctx.hex()
1284 node = ms.localctx.hex()
1282 else:
1285 else:
1283 # there were no conficts, mergestate was not stored
1286 # there were no conficts, mergestate was not stored
1284 node = repo[b'.'].hex()
1287 node = repo[b'.'].hex()
1285
1288
1286 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1289 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1287 stats = mergemod.clean_update(repo[node])
1290 stats = mergemod.clean_update(repo[node])
1288 assert stats.unresolvedcount == 0
1291 assert stats.unresolvedcount == 0
1289 _showstats(repo, stats)
1292 _showstats(repo, stats)
1290
1293
1291
1294
1292 def _incoming(
1295 def _incoming(
1293 displaychlist,
1296 displaychlist,
1294 subreporecurse,
1297 subreporecurse,
1295 ui,
1298 ui,
1296 repo,
1299 repo,
1297 source,
1300 source,
1298 opts,
1301 opts,
1299 buffered=False,
1302 buffered=False,
1300 subpath=None,
1303 subpath=None,
1301 ):
1304 ):
1302 """
1305 """
1303 Helper for incoming / gincoming.
1306 Helper for incoming / gincoming.
1304 displaychlist gets called with
1307 displaychlist gets called with
1305 (remoterepo, incomingchangesetlist, displayer) parameters,
1308 (remoterepo, incomingchangesetlist, displayer) parameters,
1306 and is supposed to contain only code that can't be unified.
1309 and is supposed to contain only code that can't be unified.
1307 """
1310 """
1308 srcs = urlutil.get_pull_paths(repo, ui, [source])
1311 srcs = urlutil.get_pull_paths(repo, ui, [source])
1309 srcs = list(srcs)
1312 srcs = list(srcs)
1310 if len(srcs) != 1:
1313 if len(srcs) != 1:
1311 msg = _(b'for now, incoming supports only a single source, %d provided')
1314 msg = _(b'for now, incoming supports only a single source, %d provided')
1312 msg %= len(srcs)
1315 msg %= len(srcs)
1313 raise error.Abort(msg)
1316 raise error.Abort(msg)
1314 path = srcs[0]
1317 path = srcs[0]
1315 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1318 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1316 if subpath is not None:
1319 if subpath is not None:
1317 subpath = urlutil.url(subpath)
1320 subpath = urlutil.url(subpath)
1318 if subpath.isabs():
1321 if subpath.isabs():
1319 source = bytes(subpath)
1322 source = bytes(subpath)
1320 else:
1323 else:
1321 p = urlutil.url(source)
1324 p = urlutil.url(source)
1322 if p.islocal():
1325 if p.islocal():
1323 normpath = os.path.normpath
1326 normpath = os.path.normpath
1324 else:
1327 else:
1325 normpath = posixpath.normpath
1328 normpath = posixpath.normpath
1326 p.path = normpath(b'%s/%s' % (p.path, subpath))
1329 p.path = normpath(b'%s/%s' % (p.path, subpath))
1327 source = bytes(p)
1330 source = bytes(p)
1328 other = peer(repo, opts, source)
1331 other = peer(repo, opts, source)
1329 cleanupfn = other.close
1332 cleanupfn = other.close
1330 try:
1333 try:
1331 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1334 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1332 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1335 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1333
1336
1334 if revs:
1337 if revs:
1335 revs = [other.lookup(rev) for rev in revs]
1338 revs = [other.lookup(rev) for rev in revs]
1336 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1339 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1337 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1340 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1338 )
1341 )
1339
1342
1340 if not chlist:
1343 if not chlist:
1341 ui.status(_(b"no changes found\n"))
1344 ui.status(_(b"no changes found\n"))
1342 return subreporecurse()
1345 return subreporecurse()
1343 ui.pager(b'incoming')
1346 ui.pager(b'incoming')
1344 displayer = logcmdutil.changesetdisplayer(
1347 displayer = logcmdutil.changesetdisplayer(
1345 ui, other, opts, buffered=buffered
1348 ui, other, opts, buffered=buffered
1346 )
1349 )
1347 displaychlist(other, chlist, displayer)
1350 displaychlist(other, chlist, displayer)
1348 displayer.close()
1351 displayer.close()
1349 finally:
1352 finally:
1350 cleanupfn()
1353 cleanupfn()
1351 subreporecurse()
1354 subreporecurse()
1352 return 0 # exit code is zero since we found incoming changes
1355 return 0 # exit code is zero since we found incoming changes
1353
1356
1354
1357
1355 def incoming(ui, repo, source, opts, subpath=None):
1358 def incoming(ui, repo, source, opts, subpath=None):
1356 def subreporecurse():
1359 def subreporecurse():
1357 ret = 1
1360 ret = 1
1358 if opts.get(b'subrepos'):
1361 if opts.get(b'subrepos'):
1359 ctx = repo[None]
1362 ctx = repo[None]
1360 for subpath in sorted(ctx.substate):
1363 for subpath in sorted(ctx.substate):
1361 sub = ctx.sub(subpath)
1364 sub = ctx.sub(subpath)
1362 ret = min(ret, sub.incoming(ui, source, opts))
1365 ret = min(ret, sub.incoming(ui, source, opts))
1363 return ret
1366 return ret
1364
1367
1365 def display(other, chlist, displayer):
1368 def display(other, chlist, displayer):
1366 limit = logcmdutil.getlimit(opts)
1369 limit = logcmdutil.getlimit(opts)
1367 if opts.get(b'newest_first'):
1370 if opts.get(b'newest_first'):
1368 chlist.reverse()
1371 chlist.reverse()
1369 count = 0
1372 count = 0
1370 for n in chlist:
1373 for n in chlist:
1371 if limit is not None and count >= limit:
1374 if limit is not None and count >= limit:
1372 break
1375 break
1373 parents = [
1376 parents = [
1374 p for p in other.changelog.parents(n) if p != repo.nullid
1377 p for p in other.changelog.parents(n) if p != repo.nullid
1375 ]
1378 ]
1376 if opts.get(b'no_merges') and len(parents) == 2:
1379 if opts.get(b'no_merges') and len(parents) == 2:
1377 continue
1380 continue
1378 count += 1
1381 count += 1
1379 displayer.show(other[n])
1382 displayer.show(other[n])
1380
1383
1381 return _incoming(
1384 return _incoming(
1382 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1385 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1383 )
1386 )
1384
1387
1385
1388
1386 def _outgoing(ui, repo, dests, opts, subpath=None):
1389 def _outgoing(ui, repo, dests, opts, subpath=None):
1387 out = set()
1390 out = set()
1388 others = []
1391 others = []
1389 for path in urlutil.get_push_paths(repo, ui, dests):
1392 for path in urlutil.get_push_paths(repo, ui, dests):
1390 dest = path.pushloc or path.loc
1393 dest = path.pushloc or path.loc
1391 if subpath is not None:
1394 if subpath is not None:
1392 subpath = urlutil.url(subpath)
1395 subpath = urlutil.url(subpath)
1393 if subpath.isabs():
1396 if subpath.isabs():
1394 dest = bytes(subpath)
1397 dest = bytes(subpath)
1395 else:
1398 else:
1396 p = urlutil.url(dest)
1399 p = urlutil.url(dest)
1397 if p.islocal():
1400 if p.islocal():
1398 normpath = os.path.normpath
1401 normpath = os.path.normpath
1399 else:
1402 else:
1400 normpath = posixpath.normpath
1403 normpath = posixpath.normpath
1401 p.path = normpath(b'%s/%s' % (p.path, subpath))
1404 p.path = normpath(b'%s/%s' % (p.path, subpath))
1402 dest = bytes(p)
1405 dest = bytes(p)
1403 branches = path.branch, opts.get(b'branch') or []
1406 branches = path.branch, opts.get(b'branch') or []
1404
1407
1405 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1408 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1406 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1409 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1407 if revs:
1410 if revs:
1408 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1411 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1409
1412
1410 other = peer(repo, opts, dest)
1413 other = peer(repo, opts, dest)
1411 try:
1414 try:
1412 outgoing = discovery.findcommonoutgoing(
1415 outgoing = discovery.findcommonoutgoing(
1413 repo, other, revs, force=opts.get(b'force')
1416 repo, other, revs, force=opts.get(b'force')
1414 )
1417 )
1415 o = outgoing.missing
1418 o = outgoing.missing
1416 out.update(o)
1419 out.update(o)
1417 if not o:
1420 if not o:
1418 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1421 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1419 others.append(other)
1422 others.append(other)
1420 except: # re-raises
1423 except: # re-raises
1421 other.close()
1424 other.close()
1422 raise
1425 raise
1423 # make sure this is ordered by revision number
1426 # make sure this is ordered by revision number
1424 outgoing_revs = list(out)
1427 outgoing_revs = list(out)
1425 cl = repo.changelog
1428 cl = repo.changelog
1426 outgoing_revs.sort(key=cl.rev)
1429 outgoing_revs.sort(key=cl.rev)
1427 return outgoing_revs, others
1430 return outgoing_revs, others
1428
1431
1429
1432
1430 def _outgoing_recurse(ui, repo, dests, opts):
1433 def _outgoing_recurse(ui, repo, dests, opts):
1431 ret = 1
1434 ret = 1
1432 if opts.get(b'subrepos'):
1435 if opts.get(b'subrepos'):
1433 ctx = repo[None]
1436 ctx = repo[None]
1434 for subpath in sorted(ctx.substate):
1437 for subpath in sorted(ctx.substate):
1435 sub = ctx.sub(subpath)
1438 sub = ctx.sub(subpath)
1436 ret = min(ret, sub.outgoing(ui, dests, opts))
1439 ret = min(ret, sub.outgoing(ui, dests, opts))
1437 return ret
1440 return ret
1438
1441
1439
1442
1440 def _outgoing_filter(repo, revs, opts):
1443 def _outgoing_filter(repo, revs, opts):
1441 """apply revision filtering/ordering option for outgoing"""
1444 """apply revision filtering/ordering option for outgoing"""
1442 limit = logcmdutil.getlimit(opts)
1445 limit = logcmdutil.getlimit(opts)
1443 no_merges = opts.get(b'no_merges')
1446 no_merges = opts.get(b'no_merges')
1444 if opts.get(b'newest_first'):
1447 if opts.get(b'newest_first'):
1445 revs.reverse()
1448 revs.reverse()
1446 if limit is None and not no_merges:
1449 if limit is None and not no_merges:
1447 for r in revs:
1450 for r in revs:
1448 yield r
1451 yield r
1449 return
1452 return
1450
1453
1451 count = 0
1454 count = 0
1452 cl = repo.changelog
1455 cl = repo.changelog
1453 for n in revs:
1456 for n in revs:
1454 if limit is not None and count >= limit:
1457 if limit is not None and count >= limit:
1455 break
1458 break
1456 parents = [p for p in cl.parents(n) if p != repo.nullid]
1459 parents = [p for p in cl.parents(n) if p != repo.nullid]
1457 if no_merges and len(parents) == 2:
1460 if no_merges and len(parents) == 2:
1458 continue
1461 continue
1459 count += 1
1462 count += 1
1460 yield n
1463 yield n
1461
1464
1462
1465
1463 def outgoing(ui, repo, dests, opts, subpath=None):
1466 def outgoing(ui, repo, dests, opts, subpath=None):
1464 if opts.get(b'graph'):
1467 if opts.get(b'graph'):
1465 logcmdutil.checkunsupportedgraphflags([], opts)
1468 logcmdutil.checkunsupportedgraphflags([], opts)
1466 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1469 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1467 ret = 1
1470 ret = 1
1468 try:
1471 try:
1469 if o:
1472 if o:
1470 ret = 0
1473 ret = 0
1471
1474
1472 if opts.get(b'graph'):
1475 if opts.get(b'graph'):
1473 revdag = logcmdutil.graphrevs(repo, o, opts)
1476 revdag = logcmdutil.graphrevs(repo, o, opts)
1474 ui.pager(b'outgoing')
1477 ui.pager(b'outgoing')
1475 displayer = logcmdutil.changesetdisplayer(
1478 displayer = logcmdutil.changesetdisplayer(
1476 ui, repo, opts, buffered=True
1479 ui, repo, opts, buffered=True
1477 )
1480 )
1478 logcmdutil.displaygraph(
1481 logcmdutil.displaygraph(
1479 ui, repo, revdag, displayer, graphmod.asciiedges
1482 ui, repo, revdag, displayer, graphmod.asciiedges
1480 )
1483 )
1481 else:
1484 else:
1482 ui.pager(b'outgoing')
1485 ui.pager(b'outgoing')
1483 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1486 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1484 for n in _outgoing_filter(repo, o, opts):
1487 for n in _outgoing_filter(repo, o, opts):
1485 displayer.show(repo[n])
1488 displayer.show(repo[n])
1486 displayer.close()
1489 displayer.close()
1487 for oth in others:
1490 for oth in others:
1488 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1491 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1489 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1492 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1490 return ret # exit code is zero since we found outgoing changes
1493 return ret # exit code is zero since we found outgoing changes
1491 finally:
1494 finally:
1492 for oth in others:
1495 for oth in others:
1493 oth.close()
1496 oth.close()
1494
1497
1495
1498
1496 def verify(repo, level=None):
1499 def verify(repo, level=None):
1497 """verify the consistency of a repository"""
1500 """verify the consistency of a repository"""
1498 ret = verifymod.verify(repo, level=level)
1501 ret = verifymod.verify(repo, level=level)
1499
1502
1500 # Broken subrepo references in hidden csets don't seem worth worrying about,
1503 # Broken subrepo references in hidden csets don't seem worth worrying about,
1501 # since they can't be pushed/pulled, and --hidden can be used if they are a
1504 # since they can't be pushed/pulled, and --hidden can be used if they are a
1502 # concern.
1505 # concern.
1503
1506
1504 # pathto() is needed for -R case
1507 # pathto() is needed for -R case
1505 revs = repo.revs(
1508 revs = repo.revs(
1506 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1509 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1507 )
1510 )
1508
1511
1509 if revs:
1512 if revs:
1510 repo.ui.status(_(b'checking subrepo links\n'))
1513 repo.ui.status(_(b'checking subrepo links\n'))
1511 for rev in revs:
1514 for rev in revs:
1512 ctx = repo[rev]
1515 ctx = repo[rev]
1513 try:
1516 try:
1514 for subpath in ctx.substate:
1517 for subpath in ctx.substate:
1515 try:
1518 try:
1516 ret = (
1519 ret = (
1517 ctx.sub(subpath, allowcreate=False).verify() or ret
1520 ctx.sub(subpath, allowcreate=False).verify() or ret
1518 )
1521 )
1519 except error.RepoError as e:
1522 except error.RepoError as e:
1520 repo.ui.warn(b'%d: %s\n' % (rev, e))
1523 repo.ui.warn(b'%d: %s\n' % (rev, e))
1521 except Exception:
1524 except Exception:
1522 repo.ui.warn(
1525 repo.ui.warn(
1523 _(b'.hgsubstate is corrupt in revision %s\n')
1526 _(b'.hgsubstate is corrupt in revision %s\n')
1524 % short(ctx.node())
1527 % short(ctx.node())
1525 )
1528 )
1526
1529
1527 return ret
1530 return ret
1528
1531
1529
1532
1530 def remoteui(src, opts):
1533 def remoteui(src, opts):
1531 """build a remote ui from ui or repo and opts"""
1534 """build a remote ui from ui or repo and opts"""
1532 if util.safehasattr(src, b'baseui'): # looks like a repository
1535 if util.safehasattr(src, b'baseui'): # looks like a repository
1533 dst = src.baseui.copy() # drop repo-specific config
1536 dst = src.baseui.copy() # drop repo-specific config
1534 src = src.ui # copy target options from repo
1537 src = src.ui # copy target options from repo
1535 else: # assume it's a global ui object
1538 else: # assume it's a global ui object
1536 dst = src.copy() # keep all global options
1539 dst = src.copy() # keep all global options
1537
1540
1538 # copy ssh-specific options
1541 # copy ssh-specific options
1539 for o in b'ssh', b'remotecmd':
1542 for o in b'ssh', b'remotecmd':
1540 v = opts.get(o) or src.config(b'ui', o)
1543 v = opts.get(o) or src.config(b'ui', o)
1541 if v:
1544 if v:
1542 dst.setconfig(b"ui", o, v, b'copied')
1545 dst.setconfig(b"ui", o, v, b'copied')
1543
1546
1544 # copy bundle-specific options
1547 # copy bundle-specific options
1545 r = src.config(b'bundle', b'mainreporoot')
1548 r = src.config(b'bundle', b'mainreporoot')
1546 if r:
1549 if r:
1547 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1550 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1548
1551
1549 # copy selected local settings to the remote ui
1552 # copy selected local settings to the remote ui
1550 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1553 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1551 for key, val in src.configitems(sect):
1554 for key, val in src.configitems(sect):
1552 dst.setconfig(sect, key, val, b'copied')
1555 dst.setconfig(sect, key, val, b'copied')
1553 v = src.config(b'web', b'cacerts')
1556 v = src.config(b'web', b'cacerts')
1554 if v:
1557 if v:
1555 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1558 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1556
1559
1557 return dst
1560 return dst
1558
1561
1559
1562
1560 # Files of interest
1563 # Files of interest
1561 # Used to check if the repository has changed looking at mtime and size of
1564 # Used to check if the repository has changed looking at mtime and size of
1562 # these files.
1565 # these files.
1563 foi = [
1566 foi = [
1564 (b'spath', b'00changelog.i'),
1567 (b'spath', b'00changelog.i'),
1565 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1568 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1566 (b'spath', b'obsstore'),
1569 (b'spath', b'obsstore'),
1567 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1570 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1568 ]
1571 ]
1569
1572
1570
1573
1571 class cachedlocalrepo:
1574 class cachedlocalrepo:
1572 """Holds a localrepository that can be cached and reused."""
1575 """Holds a localrepository that can be cached and reused."""
1573
1576
1574 def __init__(self, repo):
1577 def __init__(self, repo):
1575 """Create a new cached repo from an existing repo.
1578 """Create a new cached repo from an existing repo.
1576
1579
1577 We assume the passed in repo was recently created. If the
1580 We assume the passed in repo was recently created. If the
1578 repo has changed between when it was created and when it was
1581 repo has changed between when it was created and when it was
1579 turned into a cache, it may not refresh properly.
1582 turned into a cache, it may not refresh properly.
1580 """
1583 """
1581 assert isinstance(repo, localrepo.localrepository)
1584 assert isinstance(repo, localrepo.localrepository)
1582 self._repo = repo
1585 self._repo = repo
1583 self._state, self.mtime = self._repostate()
1586 self._state, self.mtime = self._repostate()
1584 self._filtername = repo.filtername
1587 self._filtername = repo.filtername
1585
1588
1586 def fetch(self):
1589 def fetch(self):
1587 """Refresh (if necessary) and return a repository.
1590 """Refresh (if necessary) and return a repository.
1588
1591
1589 If the cached instance is out of date, it will be recreated
1592 If the cached instance is out of date, it will be recreated
1590 automatically and returned.
1593 automatically and returned.
1591
1594
1592 Returns a tuple of the repo and a boolean indicating whether a new
1595 Returns a tuple of the repo and a boolean indicating whether a new
1593 repo instance was created.
1596 repo instance was created.
1594 """
1597 """
1595 # We compare the mtimes and sizes of some well-known files to
1598 # We compare the mtimes and sizes of some well-known files to
1596 # determine if the repo changed. This is not precise, as mtimes
1599 # determine if the repo changed. This is not precise, as mtimes
1597 # are susceptible to clock skew and imprecise filesystems and
1600 # are susceptible to clock skew and imprecise filesystems and
1598 # file content can change while maintaining the same size.
1601 # file content can change while maintaining the same size.
1599
1602
1600 state, mtime = self._repostate()
1603 state, mtime = self._repostate()
1601 if state == self._state:
1604 if state == self._state:
1602 return self._repo, False
1605 return self._repo, False
1603
1606
1604 repo = repository(self._repo.baseui, self._repo.url())
1607 repo = repository(self._repo.baseui, self._repo.url())
1605 if self._filtername:
1608 if self._filtername:
1606 self._repo = repo.filtered(self._filtername)
1609 self._repo = repo.filtered(self._filtername)
1607 else:
1610 else:
1608 self._repo = repo.unfiltered()
1611 self._repo = repo.unfiltered()
1609 self._state = state
1612 self._state = state
1610 self.mtime = mtime
1613 self.mtime = mtime
1611
1614
1612 return self._repo, True
1615 return self._repo, True
1613
1616
1614 def _repostate(self):
1617 def _repostate(self):
1615 state = []
1618 state = []
1616 maxmtime = -1
1619 maxmtime = -1
1617 for attr, fname in foi:
1620 for attr, fname in foi:
1618 prefix = getattr(self._repo, attr)
1621 prefix = getattr(self._repo, attr)
1619 p = os.path.join(prefix, fname)
1622 p = os.path.join(prefix, fname)
1620 try:
1623 try:
1621 st = os.stat(p)
1624 st = os.stat(p)
1622 except OSError:
1625 except OSError:
1623 st = os.stat(prefix)
1626 st = os.stat(prefix)
1624 state.append((st[stat.ST_MTIME], st.st_size))
1627 state.append((st[stat.ST_MTIME], st.st_size))
1625 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1628 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1626
1629
1627 return tuple(state), maxmtime
1630 return tuple(state), maxmtime
1628
1631
1629 def copy(self):
1632 def copy(self):
1630 """Obtain a copy of this class instance.
1633 """Obtain a copy of this class instance.
1631
1634
1632 A new localrepository instance is obtained. The new instance should be
1635 A new localrepository instance is obtained. The new instance should be
1633 completely independent of the original.
1636 completely independent of the original.
1634 """
1637 """
1635 repo = repository(self._repo.baseui, self._repo.origroot)
1638 repo = repository(self._repo.baseui, self._repo.origroot)
1636 if self._filtername:
1639 if self._filtername:
1637 repo = repo.filtered(self._filtername)
1640 repo = repo.filtered(self._filtername)
1638 else:
1641 else:
1639 repo = repo.unfiltered()
1642 repo = repo.unfiltered()
1640 c = cachedlocalrepo(repo)
1643 c = cachedlocalrepo(repo)
1641 c._state = self._state
1644 c._state = self._state
1642 c.mtime = self.mtime
1645 c.mtime = self.mtime
1643 return c
1646 return c
General Comments 0
You need to be logged in to leave comments. Login now