##// END OF EJS Templates
peer-or-repo: remove the now unused function...
marmoute -
r50589:c4731eee default
parent child Browse files
Show More
@@ -1,1646 +1,1636 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _peerorrepo(
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 ):
198 """return a repository object for the specified path"""
199 cls = _peerlookup(path)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 return obj
203
204
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
207 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
208 f(ui, obj)
198 f(ui, obj)
209 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
210 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
211 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
214 if hook:
204 if hook:
215 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
216 hook(ui, obj)
206 hook(ui, obj)
217 msg = b' > reposetup for %s took %s\n'
207 msg = b' > reposetup for %s took %s\n'
218 ui.log(b'extension', msg, name, stats)
208 ui.log(b'extension', msg, name, stats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 if not obj.local():
210 if not obj.local():
221 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
222 f(ui, obj)
212 f(ui, obj)
223
213
224
214
225 def repository(
215 def repository(
226 ui,
216 ui,
227 path=b'',
217 path=b'',
228 create=False,
218 create=False,
229 presetupfuncs=None,
219 presetupfuncs=None,
230 intents=None,
220 intents=None,
231 createopts=None,
221 createopts=None,
232 ):
222 ):
233 """return a repository object for the specified path"""
223 """return a repository object for the specified path"""
234 scheme = urlutil.url(path).scheme
224 scheme = urlutil.url(path).scheme
235 if scheme is None:
225 if scheme is None:
236 scheme = b'file'
226 scheme = b'file'
237 cls = repo_schemes.get(scheme)
227 cls = repo_schemes.get(scheme)
238 if cls is None:
228 if cls is None:
239 if scheme in peer_schemes:
229 if scheme in peer_schemes:
240 raise error.Abort(_(b"repository '%s' is not local") % path)
230 raise error.Abort(_(b"repository '%s' is not local") % path)
241 cls = LocalFactory
231 cls = LocalFactory
242 repo = cls.instance(
232 repo = cls.instance(
243 ui,
233 ui,
244 path,
234 path,
245 create,
235 create,
246 intents=intents,
236 intents=intents,
247 createopts=createopts,
237 createopts=createopts,
248 )
238 )
249 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
250 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
251
241
252
242
253 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
254 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
255 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
256 scheme = urlutil.url(path).scheme
246 scheme = urlutil.url(path).scheme
257 if scheme in peer_schemes:
247 if scheme in peer_schemes:
258 cls = peer_schemes[scheme]
248 cls = peer_schemes[scheme]
259 peer = cls.instance(
249 peer = cls.instance(
260 rui,
250 rui,
261 path,
251 path,
262 create,
252 create,
263 intents=intents,
253 intents=intents,
264 createopts=createopts,
254 createopts=createopts,
265 )
255 )
266 _setup_repo_or_peer(rui, peer)
256 _setup_repo_or_peer(rui, peer)
267 else:
257 else:
268 # this is a repository
258 # this is a repository
269 repo = repository(
259 repo = repository(
270 rui,
260 rui,
271 path,
261 path,
272 create,
262 create,
273 intents=intents,
263 intents=intents,
274 createopts=createopts,
264 createopts=createopts,
275 )
265 )
276 peer = repo.peer()
266 peer = repo.peer()
277 return peer
267 return peer
278
268
279
269
280 def defaultdest(source):
270 def defaultdest(source):
281 """return default destination of clone if none is given
271 """return default destination of clone if none is given
282
272
283 >>> defaultdest(b'foo')
273 >>> defaultdest(b'foo')
284 'foo'
274 'foo'
285 >>> defaultdest(b'/foo/bar')
275 >>> defaultdest(b'/foo/bar')
286 'bar'
276 'bar'
287 >>> defaultdest(b'/')
277 >>> defaultdest(b'/')
288 ''
278 ''
289 >>> defaultdest(b'')
279 >>> defaultdest(b'')
290 ''
280 ''
291 >>> defaultdest(b'http://example.org/')
281 >>> defaultdest(b'http://example.org/')
292 ''
282 ''
293 >>> defaultdest(b'http://example.org/foo/')
283 >>> defaultdest(b'http://example.org/foo/')
294 'foo'
284 'foo'
295 """
285 """
296 path = urlutil.url(source).path
286 path = urlutil.url(source).path
297 if not path:
287 if not path:
298 return b''
288 return b''
299 return os.path.basename(os.path.normpath(path))
289 return os.path.basename(os.path.normpath(path))
300
290
301
291
302 def sharedreposource(repo):
292 def sharedreposource(repo):
303 """Returns repository object for source repository of a shared repo.
293 """Returns repository object for source repository of a shared repo.
304
294
305 If repo is not a shared repository, returns None.
295 If repo is not a shared repository, returns None.
306 """
296 """
307 if repo.sharedpath == repo.path:
297 if repo.sharedpath == repo.path:
308 return None
298 return None
309
299
310 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
300 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 return repo.srcrepo
301 return repo.srcrepo
312
302
313 # the sharedpath always ends in the .hg; we want the path to the repo
303 # the sharedpath always ends in the .hg; we want the path to the repo
314 source = repo.vfs.split(repo.sharedpath)[0]
304 source = repo.vfs.split(repo.sharedpath)[0]
315 srcurl, branches = urlutil.parseurl(source)
305 srcurl, branches = urlutil.parseurl(source)
316 srcrepo = repository(repo.ui, srcurl)
306 srcrepo = repository(repo.ui, srcurl)
317 repo.srcrepo = srcrepo
307 repo.srcrepo = srcrepo
318 return srcrepo
308 return srcrepo
319
309
320
310
321 def share(
311 def share(
322 ui,
312 ui,
323 source,
313 source,
324 dest=None,
314 dest=None,
325 update=True,
315 update=True,
326 bookmarks=True,
316 bookmarks=True,
327 defaultpath=None,
317 defaultpath=None,
328 relative=False,
318 relative=False,
329 ):
319 ):
330 '''create a shared repository'''
320 '''create a shared repository'''
331
321
332 not_local_msg = _(b'can only share local repositories')
322 not_local_msg = _(b'can only share local repositories')
333 if util.safehasattr(source, 'local'):
323 if util.safehasattr(source, 'local'):
334 if source.local() is None:
324 if source.local() is None:
335 raise error.Abort(not_local_msg)
325 raise error.Abort(not_local_msg)
336 elif not islocal(source):
326 elif not islocal(source):
337 # XXX why are we getting bytes here ?
327 # XXX why are we getting bytes here ?
338 raise error.Abort(not_local_msg)
328 raise error.Abort(not_local_msg)
339
329
340 if not dest:
330 if not dest:
341 dest = defaultdest(source)
331 dest = defaultdest(source)
342 else:
332 else:
343 dest = urlutil.get_clone_path(ui, dest)[1]
333 dest = urlutil.get_clone_path(ui, dest)[1]
344
334
345 if isinstance(source, bytes):
335 if isinstance(source, bytes):
346 origsource, source, branches = urlutil.get_clone_path(ui, source)
336 origsource, source, branches = urlutil.get_clone_path(ui, source)
347 srcrepo = repository(ui, source)
337 srcrepo = repository(ui, source)
348 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
338 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
349 else:
339 else:
350 srcrepo = source.local()
340 srcrepo = source.local()
351 checkout = None
341 checkout = None
352
342
353 shareditems = set()
343 shareditems = set()
354 if bookmarks:
344 if bookmarks:
355 shareditems.add(sharedbookmarks)
345 shareditems.add(sharedbookmarks)
356
346
357 r = repository(
347 r = repository(
358 ui,
348 ui,
359 dest,
349 dest,
360 create=True,
350 create=True,
361 createopts={
351 createopts={
362 b'sharedrepo': srcrepo,
352 b'sharedrepo': srcrepo,
363 b'sharedrelative': relative,
353 b'sharedrelative': relative,
364 b'shareditems': shareditems,
354 b'shareditems': shareditems,
365 },
355 },
366 )
356 )
367
357
368 postshare(srcrepo, r, defaultpath=defaultpath)
358 postshare(srcrepo, r, defaultpath=defaultpath)
369 r = repository(ui, dest)
359 r = repository(ui, dest)
370 _postshareupdate(r, update, checkout=checkout)
360 _postshareupdate(r, update, checkout=checkout)
371 return r
361 return r
372
362
373
363
374 def _prependsourcehgrc(repo):
364 def _prependsourcehgrc(repo):
375 """copies the source repo config and prepend it in current repo .hg/hgrc
365 """copies the source repo config and prepend it in current repo .hg/hgrc
376 on unshare. This is only done if the share was perfomed using share safe
366 on unshare. This is only done if the share was perfomed using share safe
377 method where we share config of source in shares"""
367 method where we share config of source in shares"""
378 srcvfs = vfsmod.vfs(repo.sharedpath)
368 srcvfs = vfsmod.vfs(repo.sharedpath)
379 dstvfs = vfsmod.vfs(repo.path)
369 dstvfs = vfsmod.vfs(repo.path)
380
370
381 if not srcvfs.exists(b'hgrc'):
371 if not srcvfs.exists(b'hgrc'):
382 return
372 return
383
373
384 currentconfig = b''
374 currentconfig = b''
385 if dstvfs.exists(b'hgrc'):
375 if dstvfs.exists(b'hgrc'):
386 currentconfig = dstvfs.read(b'hgrc')
376 currentconfig = dstvfs.read(b'hgrc')
387
377
388 with dstvfs(b'hgrc', b'wb') as fp:
378 with dstvfs(b'hgrc', b'wb') as fp:
389 sourceconfig = srcvfs.read(b'hgrc')
379 sourceconfig = srcvfs.read(b'hgrc')
390 fp.write(b"# Config copied from shared source\n")
380 fp.write(b"# Config copied from shared source\n")
391 fp.write(sourceconfig)
381 fp.write(sourceconfig)
392 fp.write(b'\n')
382 fp.write(b'\n')
393 fp.write(currentconfig)
383 fp.write(currentconfig)
394
384
395
385
396 def unshare(ui, repo):
386 def unshare(ui, repo):
397 """convert a shared repository to a normal one
387 """convert a shared repository to a normal one
398
388
399 Copy the store data to the repo and remove the sharedpath data.
389 Copy the store data to the repo and remove the sharedpath data.
400
390
401 Returns a new repository object representing the unshared repository.
391 Returns a new repository object representing the unshared repository.
402
392
403 The passed repository object is not usable after this function is
393 The passed repository object is not usable after this function is
404 called.
394 called.
405 """
395 """
406
396
407 with repo.lock():
397 with repo.lock():
408 # we use locks here because if we race with commit, we
398 # we use locks here because if we race with commit, we
409 # can end up with extra data in the cloned revlogs that's
399 # can end up with extra data in the cloned revlogs that's
410 # not pointed to by changesets, thus causing verify to
400 # not pointed to by changesets, thus causing verify to
411 # fail
401 # fail
412 destlock = copystore(ui, repo, repo.path)
402 destlock = copystore(ui, repo, repo.path)
413 with destlock or util.nullcontextmanager():
403 with destlock or util.nullcontextmanager():
414 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
404 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
415 # we were sharing .hg/hgrc of the share source with the current
405 # we were sharing .hg/hgrc of the share source with the current
416 # repo. We need to copy that while unsharing otherwise it can
406 # repo. We need to copy that while unsharing otherwise it can
417 # disable hooks and other checks
407 # disable hooks and other checks
418 _prependsourcehgrc(repo)
408 _prependsourcehgrc(repo)
419
409
420 sharefile = repo.vfs.join(b'sharedpath')
410 sharefile = repo.vfs.join(b'sharedpath')
421 util.rename(sharefile, sharefile + b'.old')
411 util.rename(sharefile, sharefile + b'.old')
422
412
423 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
413 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
424 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
414 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
425 scmutil.writereporequirements(repo)
415 scmutil.writereporequirements(repo)
426
416
427 # Removing share changes some fundamental properties of the repo instance.
417 # Removing share changes some fundamental properties of the repo instance.
428 # So we instantiate a new repo object and operate on it rather than
418 # So we instantiate a new repo object and operate on it rather than
429 # try to keep the existing repo usable.
419 # try to keep the existing repo usable.
430 newrepo = repository(repo.baseui, repo.root, create=False)
420 newrepo = repository(repo.baseui, repo.root, create=False)
431
421
432 # TODO: figure out how to access subrepos that exist, but were previously
422 # TODO: figure out how to access subrepos that exist, but were previously
433 # removed from .hgsub
423 # removed from .hgsub
434 c = newrepo[b'.']
424 c = newrepo[b'.']
435 subs = c.substate
425 subs = c.substate
436 for s in sorted(subs):
426 for s in sorted(subs):
437 c.sub(s).unshare()
427 c.sub(s).unshare()
438
428
439 localrepo.poisonrepository(repo)
429 localrepo.poisonrepository(repo)
440
430
441 return newrepo
431 return newrepo
442
432
443
433
444 def postshare(sourcerepo, destrepo, defaultpath=None):
434 def postshare(sourcerepo, destrepo, defaultpath=None):
445 """Called after a new shared repo is created.
435 """Called after a new shared repo is created.
446
436
447 The new repo only has a requirements file and pointer to the source.
437 The new repo only has a requirements file and pointer to the source.
448 This function configures additional shared data.
438 This function configures additional shared data.
449
439
450 Extensions can wrap this function and write additional entries to
440 Extensions can wrap this function and write additional entries to
451 destrepo/.hg/shared to indicate additional pieces of data to be shared.
441 destrepo/.hg/shared to indicate additional pieces of data to be shared.
452 """
442 """
453 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
443 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
454 if default:
444 if default:
455 template = b'[paths]\ndefault = %s\n'
445 template = b'[paths]\ndefault = %s\n'
456 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
446 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
457 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
447 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
458 with destrepo.wlock():
448 with destrepo.wlock():
459 narrowspec.copytoworkingcopy(destrepo)
449 narrowspec.copytoworkingcopy(destrepo)
460
450
461
451
462 def _postshareupdate(repo, update, checkout=None):
452 def _postshareupdate(repo, update, checkout=None):
463 """Maybe perform a working directory update after a shared repo is created.
453 """Maybe perform a working directory update after a shared repo is created.
464
454
465 ``update`` can be a boolean or a revision to update to.
455 ``update`` can be a boolean or a revision to update to.
466 """
456 """
467 if not update:
457 if not update:
468 return
458 return
469
459
470 repo.ui.status(_(b"updating working directory\n"))
460 repo.ui.status(_(b"updating working directory\n"))
471 if update is not True:
461 if update is not True:
472 checkout = update
462 checkout = update
473 for test in (checkout, b'default', b'tip'):
463 for test in (checkout, b'default', b'tip'):
474 if test is None:
464 if test is None:
475 continue
465 continue
476 try:
466 try:
477 uprev = repo.lookup(test)
467 uprev = repo.lookup(test)
478 break
468 break
479 except error.RepoLookupError:
469 except error.RepoLookupError:
480 continue
470 continue
481 _update(repo, uprev)
471 _update(repo, uprev)
482
472
483
473
484 def copystore(ui, srcrepo, destpath):
474 def copystore(ui, srcrepo, destpath):
485 """copy files from store of srcrepo in destpath
475 """copy files from store of srcrepo in destpath
486
476
487 returns destlock
477 returns destlock
488 """
478 """
489 destlock = None
479 destlock = None
490 try:
480 try:
491 hardlink = None
481 hardlink = None
492 topic = _(b'linking') if hardlink else _(b'copying')
482 topic = _(b'linking') if hardlink else _(b'copying')
493 with ui.makeprogress(topic, unit=_(b'files')) as progress:
483 with ui.makeprogress(topic, unit=_(b'files')) as progress:
494 num = 0
484 num = 0
495 srcpublishing = srcrepo.publishing()
485 srcpublishing = srcrepo.publishing()
496 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
486 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
497 dstvfs = vfsmod.vfs(destpath)
487 dstvfs = vfsmod.vfs(destpath)
498 for f in srcrepo.store.copylist():
488 for f in srcrepo.store.copylist():
499 if srcpublishing and f.endswith(b'phaseroots'):
489 if srcpublishing and f.endswith(b'phaseroots'):
500 continue
490 continue
501 dstbase = os.path.dirname(f)
491 dstbase = os.path.dirname(f)
502 if dstbase and not dstvfs.exists(dstbase):
492 if dstbase and not dstvfs.exists(dstbase):
503 dstvfs.mkdir(dstbase)
493 dstvfs.mkdir(dstbase)
504 if srcvfs.exists(f):
494 if srcvfs.exists(f):
505 if f.endswith(b'data'):
495 if f.endswith(b'data'):
506 # 'dstbase' may be empty (e.g. revlog format 0)
496 # 'dstbase' may be empty (e.g. revlog format 0)
507 lockfile = os.path.join(dstbase, b"lock")
497 lockfile = os.path.join(dstbase, b"lock")
508 # lock to avoid premature writing to the target
498 # lock to avoid premature writing to the target
509 destlock = lock.lock(dstvfs, lockfile)
499 destlock = lock.lock(dstvfs, lockfile)
510 hardlink, n = util.copyfiles(
500 hardlink, n = util.copyfiles(
511 srcvfs.join(f), dstvfs.join(f), hardlink, progress
501 srcvfs.join(f), dstvfs.join(f), hardlink, progress
512 )
502 )
513 num += n
503 num += n
514 if hardlink:
504 if hardlink:
515 ui.debug(b"linked %d files\n" % num)
505 ui.debug(b"linked %d files\n" % num)
516 else:
506 else:
517 ui.debug(b"copied %d files\n" % num)
507 ui.debug(b"copied %d files\n" % num)
518 return destlock
508 return destlock
519 except: # re-raises
509 except: # re-raises
520 release(destlock)
510 release(destlock)
521 raise
511 raise
522
512
523
513
524 def clonewithshare(
514 def clonewithshare(
525 ui,
515 ui,
526 peeropts,
516 peeropts,
527 sharepath,
517 sharepath,
528 source,
518 source,
529 srcpeer,
519 srcpeer,
530 dest,
520 dest,
531 pull=False,
521 pull=False,
532 rev=None,
522 rev=None,
533 update=True,
523 update=True,
534 stream=False,
524 stream=False,
535 ):
525 ):
536 """Perform a clone using a shared repo.
526 """Perform a clone using a shared repo.
537
527
538 The store for the repository will be located at <sharepath>/.hg. The
528 The store for the repository will be located at <sharepath>/.hg. The
539 specified revisions will be cloned or pulled from "source". A shared repo
529 specified revisions will be cloned or pulled from "source". A shared repo
540 will be created at "dest" and a working copy will be created if "update" is
530 will be created at "dest" and a working copy will be created if "update" is
541 True.
531 True.
542 """
532 """
543 revs = None
533 revs = None
544 if rev:
534 if rev:
545 if not srcpeer.capable(b'lookup'):
535 if not srcpeer.capable(b'lookup'):
546 raise error.Abort(
536 raise error.Abort(
547 _(
537 _(
548 b"src repository does not support "
538 b"src repository does not support "
549 b"revision lookup and so doesn't "
539 b"revision lookup and so doesn't "
550 b"support clone by revision"
540 b"support clone by revision"
551 )
541 )
552 )
542 )
553
543
554 # TODO this is batchable.
544 # TODO this is batchable.
555 remoterevs = []
545 remoterevs = []
556 for r in rev:
546 for r in rev:
557 with srcpeer.commandexecutor() as e:
547 with srcpeer.commandexecutor() as e:
558 remoterevs.append(
548 remoterevs.append(
559 e.callcommand(
549 e.callcommand(
560 b'lookup',
550 b'lookup',
561 {
551 {
562 b'key': r,
552 b'key': r,
563 },
553 },
564 ).result()
554 ).result()
565 )
555 )
566 revs = remoterevs
556 revs = remoterevs
567
557
568 # Obtain a lock before checking for or cloning the pooled repo otherwise
558 # Obtain a lock before checking for or cloning the pooled repo otherwise
569 # 2 clients may race creating or populating it.
559 # 2 clients may race creating or populating it.
570 pooldir = os.path.dirname(sharepath)
560 pooldir = os.path.dirname(sharepath)
571 # lock class requires the directory to exist.
561 # lock class requires the directory to exist.
572 try:
562 try:
573 util.makedir(pooldir, False)
563 util.makedir(pooldir, False)
574 except FileExistsError:
564 except FileExistsError:
575 pass
565 pass
576
566
577 poolvfs = vfsmod.vfs(pooldir)
567 poolvfs = vfsmod.vfs(pooldir)
578 basename = os.path.basename(sharepath)
568 basename = os.path.basename(sharepath)
579
569
580 with lock.lock(poolvfs, b'%s.lock' % basename):
570 with lock.lock(poolvfs, b'%s.lock' % basename):
581 if os.path.exists(sharepath):
571 if os.path.exists(sharepath):
582 ui.status(
572 ui.status(
583 _(b'(sharing from existing pooled repository %s)\n') % basename
573 _(b'(sharing from existing pooled repository %s)\n') % basename
584 )
574 )
585 else:
575 else:
586 ui.status(
576 ui.status(
587 _(b'(sharing from new pooled repository %s)\n') % basename
577 _(b'(sharing from new pooled repository %s)\n') % basename
588 )
578 )
589 # Always use pull mode because hardlinks in share mode don't work
579 # Always use pull mode because hardlinks in share mode don't work
590 # well. Never update because working copies aren't necessary in
580 # well. Never update because working copies aren't necessary in
591 # share mode.
581 # share mode.
592 clone(
582 clone(
593 ui,
583 ui,
594 peeropts,
584 peeropts,
595 source,
585 source,
596 dest=sharepath,
586 dest=sharepath,
597 pull=True,
587 pull=True,
598 revs=rev,
588 revs=rev,
599 update=False,
589 update=False,
600 stream=stream,
590 stream=stream,
601 )
591 )
602
592
603 # Resolve the value to put in [paths] section for the source.
593 # Resolve the value to put in [paths] section for the source.
604 if islocal(source):
594 if islocal(source):
605 defaultpath = util.abspath(urlutil.urllocalpath(source))
595 defaultpath = util.abspath(urlutil.urllocalpath(source))
606 else:
596 else:
607 defaultpath = source
597 defaultpath = source
608
598
609 sharerepo = repository(ui, path=sharepath)
599 sharerepo = repository(ui, path=sharepath)
610 destrepo = share(
600 destrepo = share(
611 ui,
601 ui,
612 sharerepo,
602 sharerepo,
613 dest=dest,
603 dest=dest,
614 update=False,
604 update=False,
615 bookmarks=False,
605 bookmarks=False,
616 defaultpath=defaultpath,
606 defaultpath=defaultpath,
617 )
607 )
618
608
619 # We need to perform a pull against the dest repo to fetch bookmarks
609 # We need to perform a pull against the dest repo to fetch bookmarks
620 # and other non-store data that isn't shared by default. In the case of
610 # and other non-store data that isn't shared by default. In the case of
621 # non-existing shared repo, this means we pull from the remote twice. This
611 # non-existing shared repo, this means we pull from the remote twice. This
622 # is a bit weird. But at the time it was implemented, there wasn't an easy
612 # is a bit weird. But at the time it was implemented, there wasn't an easy
623 # way to pull just non-changegroup data.
613 # way to pull just non-changegroup data.
624 exchange.pull(destrepo, srcpeer, heads=revs)
614 exchange.pull(destrepo, srcpeer, heads=revs)
625
615
626 _postshareupdate(destrepo, update)
616 _postshareupdate(destrepo, update)
627
617
628 return srcpeer, peer(ui, peeropts, dest)
618 return srcpeer, peer(ui, peeropts, dest)
629
619
630
620
631 # Recomputing caches is often slow on big repos, so copy them.
621 # Recomputing caches is often slow on big repos, so copy them.
632 def _copycache(srcrepo, dstcachedir, fname):
622 def _copycache(srcrepo, dstcachedir, fname):
633 """copy a cache from srcrepo to destcachedir (if it exists)"""
623 """copy a cache from srcrepo to destcachedir (if it exists)"""
634 srcfname = srcrepo.cachevfs.join(fname)
624 srcfname = srcrepo.cachevfs.join(fname)
635 dstfname = os.path.join(dstcachedir, fname)
625 dstfname = os.path.join(dstcachedir, fname)
636 if os.path.exists(srcfname):
626 if os.path.exists(srcfname):
637 if not os.path.exists(dstcachedir):
627 if not os.path.exists(dstcachedir):
638 os.mkdir(dstcachedir)
628 os.mkdir(dstcachedir)
639 util.copyfile(srcfname, dstfname)
629 util.copyfile(srcfname, dstfname)
640
630
641
631
642 def clone(
632 def clone(
643 ui,
633 ui,
644 peeropts,
634 peeropts,
645 source,
635 source,
646 dest=None,
636 dest=None,
647 pull=False,
637 pull=False,
648 revs=None,
638 revs=None,
649 update=True,
639 update=True,
650 stream=False,
640 stream=False,
651 branch=None,
641 branch=None,
652 shareopts=None,
642 shareopts=None,
653 storeincludepats=None,
643 storeincludepats=None,
654 storeexcludepats=None,
644 storeexcludepats=None,
655 depth=None,
645 depth=None,
656 ):
646 ):
657 """Make a copy of an existing repository.
647 """Make a copy of an existing repository.
658
648
659 Create a copy of an existing repository in a new directory. The
649 Create a copy of an existing repository in a new directory. The
660 source and destination are URLs, as passed to the repository
650 source and destination are URLs, as passed to the repository
661 function. Returns a pair of repository peers, the source and
651 function. Returns a pair of repository peers, the source and
662 newly created destination.
652 newly created destination.
663
653
664 The location of the source is added to the new repository's
654 The location of the source is added to the new repository's
665 .hg/hgrc file, as the default to be used for future pulls and
655 .hg/hgrc file, as the default to be used for future pulls and
666 pushes.
656 pushes.
667
657
668 If an exception is raised, the partly cloned/updated destination
658 If an exception is raised, the partly cloned/updated destination
669 repository will be deleted.
659 repository will be deleted.
670
660
671 Arguments:
661 Arguments:
672
662
673 source: repository object or URL
663 source: repository object or URL
674
664
675 dest: URL of destination repository to create (defaults to base
665 dest: URL of destination repository to create (defaults to base
676 name of source repository)
666 name of source repository)
677
667
678 pull: always pull from source repository, even in local case or if the
668 pull: always pull from source repository, even in local case or if the
679 server prefers streaming
669 server prefers streaming
680
670
681 stream: stream raw data uncompressed from repository (fast over
671 stream: stream raw data uncompressed from repository (fast over
682 LAN, slow over WAN)
672 LAN, slow over WAN)
683
673
684 revs: revision to clone up to (implies pull=True)
674 revs: revision to clone up to (implies pull=True)
685
675
686 update: update working directory after clone completes, if
676 update: update working directory after clone completes, if
687 destination is local repository (True means update to default rev,
677 destination is local repository (True means update to default rev,
688 anything else is treated as a revision)
678 anything else is treated as a revision)
689
679
690 branch: branches to clone
680 branch: branches to clone
691
681
692 shareopts: dict of options to control auto sharing behavior. The "pool" key
682 shareopts: dict of options to control auto sharing behavior. The "pool" key
693 activates auto sharing mode and defines the directory for stores. The
683 activates auto sharing mode and defines the directory for stores. The
694 "mode" key determines how to construct the directory name of the shared
684 "mode" key determines how to construct the directory name of the shared
695 repository. "identity" means the name is derived from the node of the first
685 repository. "identity" means the name is derived from the node of the first
696 changeset in the repository. "remote" means the name is derived from the
686 changeset in the repository. "remote" means the name is derived from the
697 remote's path/URL. Defaults to "identity."
687 remote's path/URL. Defaults to "identity."
698
688
699 storeincludepats and storeexcludepats: sets of file patterns to include and
689 storeincludepats and storeexcludepats: sets of file patterns to include and
700 exclude in the repository copy, respectively. If not defined, all files
690 exclude in the repository copy, respectively. If not defined, all files
701 will be included (a "full" clone). Otherwise a "narrow" clone containing
691 will be included (a "full" clone). Otherwise a "narrow" clone containing
702 only the requested files will be performed. If ``storeincludepats`` is not
692 only the requested files will be performed. If ``storeincludepats`` is not
703 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
693 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
704 ``path:.``. If both are empty sets, no files will be cloned.
694 ``path:.``. If both are empty sets, no files will be cloned.
705 """
695 """
706
696
707 if isinstance(source, bytes):
697 if isinstance(source, bytes):
708 src = urlutil.get_clone_path(ui, source, branch)
698 src = urlutil.get_clone_path(ui, source, branch)
709 origsource, source, branches = src
699 origsource, source, branches = src
710 srcpeer = peer(ui, peeropts, source)
700 srcpeer = peer(ui, peeropts, source)
711 else:
701 else:
712 srcpeer = source.peer() # in case we were called with a localrepo
702 srcpeer = source.peer() # in case we were called with a localrepo
713 branches = (None, branch or [])
703 branches = (None, branch or [])
714 origsource = source = srcpeer.url()
704 origsource = source = srcpeer.url()
715 srclock = destlock = destwlock = cleandir = None
705 srclock = destlock = destwlock = cleandir = None
716 destpeer = None
706 destpeer = None
717 try:
707 try:
718 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
708 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
719
709
720 if dest is None:
710 if dest is None:
721 dest = defaultdest(source)
711 dest = defaultdest(source)
722 if dest:
712 if dest:
723 ui.status(_(b"destination directory: %s\n") % dest)
713 ui.status(_(b"destination directory: %s\n") % dest)
724 else:
714 else:
725 dest = urlutil.get_clone_path(ui, dest)[0]
715 dest = urlutil.get_clone_path(ui, dest)[0]
726
716
727 dest = urlutil.urllocalpath(dest)
717 dest = urlutil.urllocalpath(dest)
728 source = urlutil.urllocalpath(source)
718 source = urlutil.urllocalpath(source)
729
719
730 if not dest:
720 if not dest:
731 raise error.InputError(_(b"empty destination path is not valid"))
721 raise error.InputError(_(b"empty destination path is not valid"))
732
722
733 destvfs = vfsmod.vfs(dest, expandpath=True)
723 destvfs = vfsmod.vfs(dest, expandpath=True)
734 if destvfs.lexists():
724 if destvfs.lexists():
735 if not destvfs.isdir():
725 if not destvfs.isdir():
736 raise error.InputError(
726 raise error.InputError(
737 _(b"destination '%s' already exists") % dest
727 _(b"destination '%s' already exists") % dest
738 )
728 )
739 elif destvfs.listdir():
729 elif destvfs.listdir():
740 raise error.InputError(
730 raise error.InputError(
741 _(b"destination '%s' is not empty") % dest
731 _(b"destination '%s' is not empty") % dest
742 )
732 )
743
733
744 createopts = {}
734 createopts = {}
745 narrow = False
735 narrow = False
746
736
747 if storeincludepats is not None:
737 if storeincludepats is not None:
748 narrowspec.validatepatterns(storeincludepats)
738 narrowspec.validatepatterns(storeincludepats)
749 narrow = True
739 narrow = True
750
740
751 if storeexcludepats is not None:
741 if storeexcludepats is not None:
752 narrowspec.validatepatterns(storeexcludepats)
742 narrowspec.validatepatterns(storeexcludepats)
753 narrow = True
743 narrow = True
754
744
755 if narrow:
745 if narrow:
756 # Include everything by default if only exclusion patterns defined.
746 # Include everything by default if only exclusion patterns defined.
757 if storeexcludepats and not storeincludepats:
747 if storeexcludepats and not storeincludepats:
758 storeincludepats = {b'path:.'}
748 storeincludepats = {b'path:.'}
759
749
760 createopts[b'narrowfiles'] = True
750 createopts[b'narrowfiles'] = True
761
751
762 if depth:
752 if depth:
763 createopts[b'shallowfilestore'] = True
753 createopts[b'shallowfilestore'] = True
764
754
765 if srcpeer.capable(b'lfs-serve'):
755 if srcpeer.capable(b'lfs-serve'):
766 # Repository creation honors the config if it disabled the extension, so
756 # Repository creation honors the config if it disabled the extension, so
767 # we can't just announce that lfs will be enabled. This check avoids
757 # we can't just announce that lfs will be enabled. This check avoids
768 # saying that lfs will be enabled, and then saying it's an unknown
758 # saying that lfs will be enabled, and then saying it's an unknown
769 # feature. The lfs creation option is set in either case so that a
759 # feature. The lfs creation option is set in either case so that a
770 # requirement is added. If the extension is explicitly disabled but the
760 # requirement is added. If the extension is explicitly disabled but the
771 # requirement is set, the clone aborts early, before transferring any
761 # requirement is set, the clone aborts early, before transferring any
772 # data.
762 # data.
773 createopts[b'lfs'] = True
763 createopts[b'lfs'] = True
774
764
775 if extensions.disabled_help(b'lfs'):
765 if extensions.disabled_help(b'lfs'):
776 ui.status(
766 ui.status(
777 _(
767 _(
778 b'(remote is using large file support (lfs), but it is '
768 b'(remote is using large file support (lfs), but it is '
779 b'explicitly disabled in the local configuration)\n'
769 b'explicitly disabled in the local configuration)\n'
780 )
770 )
781 )
771 )
782 else:
772 else:
783 ui.status(
773 ui.status(
784 _(
774 _(
785 b'(remote is using large file support (lfs); lfs will '
775 b'(remote is using large file support (lfs); lfs will '
786 b'be enabled for this repository)\n'
776 b'be enabled for this repository)\n'
787 )
777 )
788 )
778 )
789
779
790 shareopts = shareopts or {}
780 shareopts = shareopts or {}
791 sharepool = shareopts.get(b'pool')
781 sharepool = shareopts.get(b'pool')
792 sharenamemode = shareopts.get(b'mode')
782 sharenamemode = shareopts.get(b'mode')
793 if sharepool and islocal(dest):
783 if sharepool and islocal(dest):
794 sharepath = None
784 sharepath = None
795 if sharenamemode == b'identity':
785 if sharenamemode == b'identity':
796 # Resolve the name from the initial changeset in the remote
786 # Resolve the name from the initial changeset in the remote
797 # repository. This returns nullid when the remote is empty. It
787 # repository. This returns nullid when the remote is empty. It
798 # raises RepoLookupError if revision 0 is filtered or otherwise
788 # raises RepoLookupError if revision 0 is filtered or otherwise
799 # not available. If we fail to resolve, sharing is not enabled.
789 # not available. If we fail to resolve, sharing is not enabled.
800 try:
790 try:
801 with srcpeer.commandexecutor() as e:
791 with srcpeer.commandexecutor() as e:
802 rootnode = e.callcommand(
792 rootnode = e.callcommand(
803 b'lookup',
793 b'lookup',
804 {
794 {
805 b'key': b'0',
795 b'key': b'0',
806 },
796 },
807 ).result()
797 ).result()
808
798
809 if rootnode != sha1nodeconstants.nullid:
799 if rootnode != sha1nodeconstants.nullid:
810 sharepath = os.path.join(sharepool, hex(rootnode))
800 sharepath = os.path.join(sharepool, hex(rootnode))
811 else:
801 else:
812 ui.status(
802 ui.status(
813 _(
803 _(
814 b'(not using pooled storage: '
804 b'(not using pooled storage: '
815 b'remote appears to be empty)\n'
805 b'remote appears to be empty)\n'
816 )
806 )
817 )
807 )
818 except error.RepoLookupError:
808 except error.RepoLookupError:
819 ui.status(
809 ui.status(
820 _(
810 _(
821 b'(not using pooled storage: '
811 b'(not using pooled storage: '
822 b'unable to resolve identity of remote)\n'
812 b'unable to resolve identity of remote)\n'
823 )
813 )
824 )
814 )
825 elif sharenamemode == b'remote':
815 elif sharenamemode == b'remote':
826 sharepath = os.path.join(
816 sharepath = os.path.join(
827 sharepool, hex(hashutil.sha1(source).digest())
817 sharepool, hex(hashutil.sha1(source).digest())
828 )
818 )
829 else:
819 else:
830 raise error.Abort(
820 raise error.Abort(
831 _(b'unknown share naming mode: %s') % sharenamemode
821 _(b'unknown share naming mode: %s') % sharenamemode
832 )
822 )
833
823
834 # TODO this is a somewhat arbitrary restriction.
824 # TODO this is a somewhat arbitrary restriction.
835 if narrow:
825 if narrow:
836 ui.status(
826 ui.status(
837 _(b'(pooled storage not supported for narrow clones)\n')
827 _(b'(pooled storage not supported for narrow clones)\n')
838 )
828 )
839 sharepath = None
829 sharepath = None
840
830
841 if sharepath:
831 if sharepath:
842 return clonewithshare(
832 return clonewithshare(
843 ui,
833 ui,
844 peeropts,
834 peeropts,
845 sharepath,
835 sharepath,
846 source,
836 source,
847 srcpeer,
837 srcpeer,
848 dest,
838 dest,
849 pull=pull,
839 pull=pull,
850 rev=revs,
840 rev=revs,
851 update=update,
841 update=update,
852 stream=stream,
842 stream=stream,
853 )
843 )
854
844
855 srcrepo = srcpeer.local()
845 srcrepo = srcpeer.local()
856
846
857 abspath = origsource
847 abspath = origsource
858 if islocal(origsource):
848 if islocal(origsource):
859 abspath = util.abspath(urlutil.urllocalpath(origsource))
849 abspath = util.abspath(urlutil.urllocalpath(origsource))
860
850
861 if islocal(dest):
851 if islocal(dest):
862 if os.path.exists(dest):
852 if os.path.exists(dest):
863 # only clean up directories we create ourselves
853 # only clean up directories we create ourselves
864 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
865 cleandir = hgdir
855 cleandir = hgdir
866 else:
856 else:
867 cleandir = dest
857 cleandir = dest
868
858
869 copy = False
859 copy = False
870 if (
860 if (
871 srcrepo
861 srcrepo
872 and srcrepo.cancopy()
862 and srcrepo.cancopy()
873 and islocal(dest)
863 and islocal(dest)
874 and not phases.hassecret(srcrepo)
864 and not phases.hassecret(srcrepo)
875 ):
865 ):
876 copy = not pull and not revs
866 copy = not pull and not revs
877
867
878 # TODO this is a somewhat arbitrary restriction.
868 # TODO this is a somewhat arbitrary restriction.
879 if narrow:
869 if narrow:
880 copy = False
870 copy = False
881
871
882 if copy:
872 if copy:
883 try:
873 try:
884 # we use a lock here because if we race with commit, we
874 # we use a lock here because if we race with commit, we
885 # can end up with extra data in the cloned revlogs that's
875 # can end up with extra data in the cloned revlogs that's
886 # not pointed to by changesets, thus causing verify to
876 # not pointed to by changesets, thus causing verify to
887 # fail
877 # fail
888 srclock = srcrepo.lock(wait=False)
878 srclock = srcrepo.lock(wait=False)
889 except error.LockError:
879 except error.LockError:
890 copy = False
880 copy = False
891
881
892 if copy:
882 if copy:
893 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
883 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
894
884
895 destrootpath = urlutil.urllocalpath(dest)
885 destrootpath = urlutil.urllocalpath(dest)
896 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
886 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
897 localrepo.createrepository(
887 localrepo.createrepository(
898 ui,
888 ui,
899 destrootpath,
889 destrootpath,
900 requirements=dest_reqs,
890 requirements=dest_reqs,
901 )
891 )
902 destrepo = localrepo.makelocalrepository(ui, destrootpath)
892 destrepo = localrepo.makelocalrepository(ui, destrootpath)
903
893
904 destwlock = destrepo.wlock()
894 destwlock = destrepo.wlock()
905 destlock = destrepo.lock()
895 destlock = destrepo.lock()
906 from . import streamclone # avoid cycle
896 from . import streamclone # avoid cycle
907
897
908 streamclone.local_copy(srcrepo, destrepo)
898 streamclone.local_copy(srcrepo, destrepo)
909
899
910 # we need to re-init the repo after manually copying the data
900 # we need to re-init the repo after manually copying the data
911 # into it
901 # into it
912 destpeer = peer(srcrepo, peeropts, dest)
902 destpeer = peer(srcrepo, peeropts, dest)
913
903
914 # make the peer aware that is it already locked
904 # make the peer aware that is it already locked
915 #
905 #
916 # important:
906 # important:
917 #
907 #
918 # We still need to release that lock at the end of the function
908 # We still need to release that lock at the end of the function
919 destpeer.local()._lockref = weakref.ref(destlock)
909 destpeer.local()._lockref = weakref.ref(destlock)
920 destpeer.local()._wlockref = weakref.ref(destwlock)
910 destpeer.local()._wlockref = weakref.ref(destwlock)
921 # dirstate also needs to be copied because `_wlockref` has a reference
911 # dirstate also needs to be copied because `_wlockref` has a reference
922 # to it: this dirstate is saved to disk when the wlock is released
912 # to it: this dirstate is saved to disk when the wlock is released
923 destpeer.local().dirstate = destrepo.dirstate
913 destpeer.local().dirstate = destrepo.dirstate
924
914
925 srcrepo.hook(
915 srcrepo.hook(
926 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
916 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
927 )
917 )
928 else:
918 else:
929 try:
919 try:
930 # only pass ui when no srcrepo
920 # only pass ui when no srcrepo
931 destpeer = peer(
921 destpeer = peer(
932 srcrepo or ui,
922 srcrepo or ui,
933 peeropts,
923 peeropts,
934 dest,
924 dest,
935 create=True,
925 create=True,
936 createopts=createopts,
926 createopts=createopts,
937 )
927 )
938 except FileExistsError:
928 except FileExistsError:
939 cleandir = None
929 cleandir = None
940 raise error.Abort(_(b"destination '%s' already exists") % dest)
930 raise error.Abort(_(b"destination '%s' already exists") % dest)
941
931
942 if revs:
932 if revs:
943 if not srcpeer.capable(b'lookup'):
933 if not srcpeer.capable(b'lookup'):
944 raise error.Abort(
934 raise error.Abort(
945 _(
935 _(
946 b"src repository does not support "
936 b"src repository does not support "
947 b"revision lookup and so doesn't "
937 b"revision lookup and so doesn't "
948 b"support clone by revision"
938 b"support clone by revision"
949 )
939 )
950 )
940 )
951
941
952 # TODO this is batchable.
942 # TODO this is batchable.
953 remoterevs = []
943 remoterevs = []
954 for rev in revs:
944 for rev in revs:
955 with srcpeer.commandexecutor() as e:
945 with srcpeer.commandexecutor() as e:
956 remoterevs.append(
946 remoterevs.append(
957 e.callcommand(
947 e.callcommand(
958 b'lookup',
948 b'lookup',
959 {
949 {
960 b'key': rev,
950 b'key': rev,
961 },
951 },
962 ).result()
952 ).result()
963 )
953 )
964 revs = remoterevs
954 revs = remoterevs
965
955
966 checkout = revs[0]
956 checkout = revs[0]
967 else:
957 else:
968 revs = None
958 revs = None
969 local = destpeer.local()
959 local = destpeer.local()
970 if local:
960 if local:
971 if narrow:
961 if narrow:
972 with local.wlock(), local.lock():
962 with local.wlock(), local.lock():
973 local.setnarrowpats(storeincludepats, storeexcludepats)
963 local.setnarrowpats(storeincludepats, storeexcludepats)
974 narrowspec.copytoworkingcopy(local)
964 narrowspec.copytoworkingcopy(local)
975
965
976 u = urlutil.url(abspath)
966 u = urlutil.url(abspath)
977 defaulturl = bytes(u)
967 defaulturl = bytes(u)
978 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
968 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
979 if not stream:
969 if not stream:
980 if pull:
970 if pull:
981 stream = False
971 stream = False
982 else:
972 else:
983 stream = None
973 stream = None
984 # internal config: ui.quietbookmarkmove
974 # internal config: ui.quietbookmarkmove
985 overrides = {(b'ui', b'quietbookmarkmove'): True}
975 overrides = {(b'ui', b'quietbookmarkmove'): True}
986 with local.ui.configoverride(overrides, b'clone'):
976 with local.ui.configoverride(overrides, b'clone'):
987 exchange.pull(
977 exchange.pull(
988 local,
978 local,
989 srcpeer,
979 srcpeer,
990 heads=revs,
980 heads=revs,
991 streamclonerequested=stream,
981 streamclonerequested=stream,
992 includepats=storeincludepats,
982 includepats=storeincludepats,
993 excludepats=storeexcludepats,
983 excludepats=storeexcludepats,
994 depth=depth,
984 depth=depth,
995 )
985 )
996 elif srcrepo:
986 elif srcrepo:
997 # TODO lift restriction once exchange.push() accepts narrow
987 # TODO lift restriction once exchange.push() accepts narrow
998 # push.
988 # push.
999 if narrow:
989 if narrow:
1000 raise error.Abort(
990 raise error.Abort(
1001 _(
991 _(
1002 b'narrow clone not available for '
992 b'narrow clone not available for '
1003 b'remote destinations'
993 b'remote destinations'
1004 )
994 )
1005 )
995 )
1006
996
1007 exchange.push(
997 exchange.push(
1008 srcrepo,
998 srcrepo,
1009 destpeer,
999 destpeer,
1010 revs=revs,
1000 revs=revs,
1011 bookmarks=srcrepo._bookmarks.keys(),
1001 bookmarks=srcrepo._bookmarks.keys(),
1012 )
1002 )
1013 else:
1003 else:
1014 raise error.Abort(
1004 raise error.Abort(
1015 _(b"clone from remote to remote not supported")
1005 _(b"clone from remote to remote not supported")
1016 )
1006 )
1017
1007
1018 cleandir = None
1008 cleandir = None
1019
1009
1020 destrepo = destpeer.local()
1010 destrepo = destpeer.local()
1021 if destrepo:
1011 if destrepo:
1022 template = uimod.samplehgrcs[b'cloned']
1012 template = uimod.samplehgrcs[b'cloned']
1023 u = urlutil.url(abspath)
1013 u = urlutil.url(abspath)
1024 u.passwd = None
1014 u.passwd = None
1025 defaulturl = bytes(u)
1015 defaulturl = bytes(u)
1026 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1016 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1027 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1017 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1028
1018
1029 if ui.configbool(b'experimental', b'remotenames'):
1019 if ui.configbool(b'experimental', b'remotenames'):
1030 logexchange.pullremotenames(destrepo, srcpeer)
1020 logexchange.pullremotenames(destrepo, srcpeer)
1031
1021
1032 if update:
1022 if update:
1033 if update is not True:
1023 if update is not True:
1034 with srcpeer.commandexecutor() as e:
1024 with srcpeer.commandexecutor() as e:
1035 checkout = e.callcommand(
1025 checkout = e.callcommand(
1036 b'lookup',
1026 b'lookup',
1037 {
1027 {
1038 b'key': update,
1028 b'key': update,
1039 },
1029 },
1040 ).result()
1030 ).result()
1041
1031
1042 uprev = None
1032 uprev = None
1043 status = None
1033 status = None
1044 if checkout is not None:
1034 if checkout is not None:
1045 # Some extensions (at least hg-git and hg-subversion) have
1035 # Some extensions (at least hg-git and hg-subversion) have
1046 # a peer.lookup() implementation that returns a name instead
1036 # a peer.lookup() implementation that returns a name instead
1047 # of a nodeid. We work around it here until we've figured
1037 # of a nodeid. We work around it here until we've figured
1048 # out a better solution.
1038 # out a better solution.
1049 if len(checkout) == 20 and checkout in destrepo:
1039 if len(checkout) == 20 and checkout in destrepo:
1050 uprev = checkout
1040 uprev = checkout
1051 elif scmutil.isrevsymbol(destrepo, checkout):
1041 elif scmutil.isrevsymbol(destrepo, checkout):
1052 uprev = scmutil.revsymbol(destrepo, checkout).node()
1042 uprev = scmutil.revsymbol(destrepo, checkout).node()
1053 else:
1043 else:
1054 if update is not True:
1044 if update is not True:
1055 try:
1045 try:
1056 uprev = destrepo.lookup(update)
1046 uprev = destrepo.lookup(update)
1057 except error.RepoLookupError:
1047 except error.RepoLookupError:
1058 pass
1048 pass
1059 if uprev is None:
1049 if uprev is None:
1060 try:
1050 try:
1061 if destrepo._activebookmark:
1051 if destrepo._activebookmark:
1062 uprev = destrepo.lookup(destrepo._activebookmark)
1052 uprev = destrepo.lookup(destrepo._activebookmark)
1063 update = destrepo._activebookmark
1053 update = destrepo._activebookmark
1064 else:
1054 else:
1065 uprev = destrepo._bookmarks[b'@']
1055 uprev = destrepo._bookmarks[b'@']
1066 update = b'@'
1056 update = b'@'
1067 bn = destrepo[uprev].branch()
1057 bn = destrepo[uprev].branch()
1068 if bn == b'default':
1058 if bn == b'default':
1069 status = _(b"updating to bookmark %s\n" % update)
1059 status = _(b"updating to bookmark %s\n" % update)
1070 else:
1060 else:
1071 status = (
1061 status = (
1072 _(b"updating to bookmark %s on branch %s\n")
1062 _(b"updating to bookmark %s on branch %s\n")
1073 ) % (update, bn)
1063 ) % (update, bn)
1074 except KeyError:
1064 except KeyError:
1075 try:
1065 try:
1076 uprev = destrepo.branchtip(b'default')
1066 uprev = destrepo.branchtip(b'default')
1077 except error.RepoLookupError:
1067 except error.RepoLookupError:
1078 uprev = destrepo.lookup(b'tip')
1068 uprev = destrepo.lookup(b'tip')
1079 if not status:
1069 if not status:
1080 bn = destrepo[uprev].branch()
1070 bn = destrepo[uprev].branch()
1081 status = _(b"updating to branch %s\n") % bn
1071 status = _(b"updating to branch %s\n") % bn
1082 destrepo.ui.status(status)
1072 destrepo.ui.status(status)
1083 _update(destrepo, uprev)
1073 _update(destrepo, uprev)
1084 if update in destrepo._bookmarks:
1074 if update in destrepo._bookmarks:
1085 bookmarks.activate(destrepo, update)
1075 bookmarks.activate(destrepo, update)
1086 if destlock is not None:
1076 if destlock is not None:
1087 release(destlock)
1077 release(destlock)
1088 if destwlock is not None:
1078 if destwlock is not None:
1089 release(destlock)
1079 release(destlock)
1090 # here is a tiny windows were someone could end up writing the
1080 # here is a tiny windows were someone could end up writing the
1091 # repository before the cache are sure to be warm. This is "fine"
1081 # repository before the cache are sure to be warm. This is "fine"
1092 # as the only "bad" outcome would be some slowness. That potential
1082 # as the only "bad" outcome would be some slowness. That potential
1093 # slowness already affect reader.
1083 # slowness already affect reader.
1094 with destrepo.lock():
1084 with destrepo.lock():
1095 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1085 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1096 finally:
1086 finally:
1097 release(srclock, destlock, destwlock)
1087 release(srclock, destlock, destwlock)
1098 if cleandir is not None:
1088 if cleandir is not None:
1099 shutil.rmtree(cleandir, True)
1089 shutil.rmtree(cleandir, True)
1100 if srcpeer is not None:
1090 if srcpeer is not None:
1101 srcpeer.close()
1091 srcpeer.close()
1102 if destpeer and destpeer.local() is None:
1092 if destpeer and destpeer.local() is None:
1103 destpeer.close()
1093 destpeer.close()
1104 return srcpeer, destpeer
1094 return srcpeer, destpeer
1105
1095
1106
1096
1107 def _showstats(repo, stats, quietempty=False):
1097 def _showstats(repo, stats, quietempty=False):
1108 if quietempty and stats.isempty():
1098 if quietempty and stats.isempty():
1109 return
1099 return
1110 repo.ui.status(
1100 repo.ui.status(
1111 _(
1101 _(
1112 b"%d files updated, %d files merged, "
1102 b"%d files updated, %d files merged, "
1113 b"%d files removed, %d files unresolved\n"
1103 b"%d files removed, %d files unresolved\n"
1114 )
1104 )
1115 % (
1105 % (
1116 stats.updatedcount,
1106 stats.updatedcount,
1117 stats.mergedcount,
1107 stats.mergedcount,
1118 stats.removedcount,
1108 stats.removedcount,
1119 stats.unresolvedcount,
1109 stats.unresolvedcount,
1120 )
1110 )
1121 )
1111 )
1122
1112
1123
1113
1124 def updaterepo(repo, node, overwrite, updatecheck=None):
1114 def updaterepo(repo, node, overwrite, updatecheck=None):
1125 """Update the working directory to node.
1115 """Update the working directory to node.
1126
1116
1127 When overwrite is set, changes are clobbered, merged else
1117 When overwrite is set, changes are clobbered, merged else
1128
1118
1129 returns stats (see pydoc mercurial.merge.applyupdates)"""
1119 returns stats (see pydoc mercurial.merge.applyupdates)"""
1130 repo.ui.deprecwarn(
1120 repo.ui.deprecwarn(
1131 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1121 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1132 b'5.7',
1122 b'5.7',
1133 )
1123 )
1134 return mergemod._update(
1124 return mergemod._update(
1135 repo,
1125 repo,
1136 node,
1126 node,
1137 branchmerge=False,
1127 branchmerge=False,
1138 force=overwrite,
1128 force=overwrite,
1139 labels=[b'working copy', b'destination'],
1129 labels=[b'working copy', b'destination'],
1140 updatecheck=updatecheck,
1130 updatecheck=updatecheck,
1141 )
1131 )
1142
1132
1143
1133
1144 def update(repo, node, quietempty=False, updatecheck=None):
1134 def update(repo, node, quietempty=False, updatecheck=None):
1145 """update the working directory to node"""
1135 """update the working directory to node"""
1146 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1136 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1147 _showstats(repo, stats, quietempty)
1137 _showstats(repo, stats, quietempty)
1148 if stats.unresolvedcount:
1138 if stats.unresolvedcount:
1149 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1139 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1150 return stats.unresolvedcount > 0
1140 return stats.unresolvedcount > 0
1151
1141
1152
1142
1153 # naming conflict in clone()
1143 # naming conflict in clone()
1154 _update = update
1144 _update = update
1155
1145
1156
1146
1157 def clean(repo, node, show_stats=True, quietempty=False):
1147 def clean(repo, node, show_stats=True, quietempty=False):
1158 """forcibly switch the working directory to node, clobbering changes"""
1148 """forcibly switch the working directory to node, clobbering changes"""
1159 stats = mergemod.clean_update(repo[node])
1149 stats = mergemod.clean_update(repo[node])
1160 assert stats.unresolvedcount == 0
1150 assert stats.unresolvedcount == 0
1161 if show_stats:
1151 if show_stats:
1162 _showstats(repo, stats, quietempty)
1152 _showstats(repo, stats, quietempty)
1163 return False
1153 return False
1164
1154
1165
1155
1166 # naming conflict in updatetotally()
1156 # naming conflict in updatetotally()
1167 _clean = clean
1157 _clean = clean
1168
1158
1169 _VALID_UPDATECHECKS = {
1159 _VALID_UPDATECHECKS = {
1170 mergemod.UPDATECHECK_ABORT,
1160 mergemod.UPDATECHECK_ABORT,
1171 mergemod.UPDATECHECK_NONE,
1161 mergemod.UPDATECHECK_NONE,
1172 mergemod.UPDATECHECK_LINEAR,
1162 mergemod.UPDATECHECK_LINEAR,
1173 mergemod.UPDATECHECK_NO_CONFLICT,
1163 mergemod.UPDATECHECK_NO_CONFLICT,
1174 }
1164 }
1175
1165
1176
1166
1177 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1167 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1178 """Update the working directory with extra care for non-file components
1168 """Update the working directory with extra care for non-file components
1179
1169
1180 This takes care of non-file components below:
1170 This takes care of non-file components below:
1181
1171
1182 :bookmark: might be advanced or (in)activated
1172 :bookmark: might be advanced or (in)activated
1183
1173
1184 This takes arguments below:
1174 This takes arguments below:
1185
1175
1186 :checkout: to which revision the working directory is updated
1176 :checkout: to which revision the working directory is updated
1187 :brev: a name, which might be a bookmark to be activated after updating
1177 :brev: a name, which might be a bookmark to be activated after updating
1188 :clean: whether changes in the working directory can be discarded
1178 :clean: whether changes in the working directory can be discarded
1189 :updatecheck: how to deal with a dirty working directory
1179 :updatecheck: how to deal with a dirty working directory
1190
1180
1191 Valid values for updatecheck are the UPDATECHECK_* constants
1181 Valid values for updatecheck are the UPDATECHECK_* constants
1192 defined in the merge module. Passing `None` will result in using the
1182 defined in the merge module. Passing `None` will result in using the
1193 configured default.
1183 configured default.
1194
1184
1195 * ABORT: abort if the working directory is dirty
1185 * ABORT: abort if the working directory is dirty
1196 * NONE: don't check (merge working directory changes into destination)
1186 * NONE: don't check (merge working directory changes into destination)
1197 * LINEAR: check that update is linear before merging working directory
1187 * LINEAR: check that update is linear before merging working directory
1198 changes into destination
1188 changes into destination
1199 * NO_CONFLICT: check that the update does not result in file merges
1189 * NO_CONFLICT: check that the update does not result in file merges
1200
1190
1201 This returns whether conflict is detected at updating or not.
1191 This returns whether conflict is detected at updating or not.
1202 """
1192 """
1203 if updatecheck is None:
1193 if updatecheck is None:
1204 updatecheck = ui.config(b'commands', b'update.check')
1194 updatecheck = ui.config(b'commands', b'update.check')
1205 if updatecheck not in _VALID_UPDATECHECKS:
1195 if updatecheck not in _VALID_UPDATECHECKS:
1206 # If not configured, or invalid value configured
1196 # If not configured, or invalid value configured
1207 updatecheck = mergemod.UPDATECHECK_LINEAR
1197 updatecheck = mergemod.UPDATECHECK_LINEAR
1208 if updatecheck not in _VALID_UPDATECHECKS:
1198 if updatecheck not in _VALID_UPDATECHECKS:
1209 raise ValueError(
1199 raise ValueError(
1210 r'Invalid updatecheck value %r (can accept %r)'
1200 r'Invalid updatecheck value %r (can accept %r)'
1211 % (updatecheck, _VALID_UPDATECHECKS)
1201 % (updatecheck, _VALID_UPDATECHECKS)
1212 )
1202 )
1213 with repo.wlock():
1203 with repo.wlock():
1214 movemarkfrom = None
1204 movemarkfrom = None
1215 warndest = False
1205 warndest = False
1216 if checkout is None:
1206 if checkout is None:
1217 updata = destutil.destupdate(repo, clean=clean)
1207 updata = destutil.destupdate(repo, clean=clean)
1218 checkout, movemarkfrom, brev = updata
1208 checkout, movemarkfrom, brev = updata
1219 warndest = True
1209 warndest = True
1220
1210
1221 if clean:
1211 if clean:
1222 ret = _clean(repo, checkout)
1212 ret = _clean(repo, checkout)
1223 else:
1213 else:
1224 if updatecheck == mergemod.UPDATECHECK_ABORT:
1214 if updatecheck == mergemod.UPDATECHECK_ABORT:
1225 cmdutil.bailifchanged(repo, merge=False)
1215 cmdutil.bailifchanged(repo, merge=False)
1226 updatecheck = mergemod.UPDATECHECK_NONE
1216 updatecheck = mergemod.UPDATECHECK_NONE
1227 ret = _update(repo, checkout, updatecheck=updatecheck)
1217 ret = _update(repo, checkout, updatecheck=updatecheck)
1228
1218
1229 if not ret and movemarkfrom:
1219 if not ret and movemarkfrom:
1230 if movemarkfrom == repo[b'.'].node():
1220 if movemarkfrom == repo[b'.'].node():
1231 pass # no-op update
1221 pass # no-op update
1232 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1222 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1233 b = ui.label(repo._activebookmark, b'bookmarks.active')
1223 b = ui.label(repo._activebookmark, b'bookmarks.active')
1234 ui.status(_(b"updating bookmark %s\n") % b)
1224 ui.status(_(b"updating bookmark %s\n") % b)
1235 else:
1225 else:
1236 # this can happen with a non-linear update
1226 # this can happen with a non-linear update
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1227 b = ui.label(repo._activebookmark, b'bookmarks')
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1228 ui.status(_(b"(leaving bookmark %s)\n") % b)
1239 bookmarks.deactivate(repo)
1229 bookmarks.deactivate(repo)
1240 elif brev in repo._bookmarks:
1230 elif brev in repo._bookmarks:
1241 if brev != repo._activebookmark:
1231 if brev != repo._activebookmark:
1242 b = ui.label(brev, b'bookmarks.active')
1232 b = ui.label(brev, b'bookmarks.active')
1243 ui.status(_(b"(activating bookmark %s)\n") % b)
1233 ui.status(_(b"(activating bookmark %s)\n") % b)
1244 bookmarks.activate(repo, brev)
1234 bookmarks.activate(repo, brev)
1245 elif brev:
1235 elif brev:
1246 if repo._activebookmark:
1236 if repo._activebookmark:
1247 b = ui.label(repo._activebookmark, b'bookmarks')
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1248 ui.status(_(b"(leaving bookmark %s)\n") % b)
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1249 bookmarks.deactivate(repo)
1239 bookmarks.deactivate(repo)
1250
1240
1251 if warndest:
1241 if warndest:
1252 destutil.statusotherdests(ui, repo)
1242 destutil.statusotherdests(ui, repo)
1253
1243
1254 return ret
1244 return ret
1255
1245
1256
1246
1257 def merge(
1247 def merge(
1258 ctx,
1248 ctx,
1259 force=False,
1249 force=False,
1260 remind=True,
1250 remind=True,
1261 labels=None,
1251 labels=None,
1262 ):
1252 ):
1263 """Branch merge with node, resolving changes. Return true if any
1253 """Branch merge with node, resolving changes. Return true if any
1264 unresolved conflicts."""
1254 unresolved conflicts."""
1265 repo = ctx.repo()
1255 repo = ctx.repo()
1266 stats = mergemod.merge(ctx, force=force, labels=labels)
1256 stats = mergemod.merge(ctx, force=force, labels=labels)
1267 _showstats(repo, stats)
1257 _showstats(repo, stats)
1268 if stats.unresolvedcount:
1258 if stats.unresolvedcount:
1269 repo.ui.status(
1259 repo.ui.status(
1270 _(
1260 _(
1271 b"use 'hg resolve' to retry unresolved file merges "
1261 b"use 'hg resolve' to retry unresolved file merges "
1272 b"or 'hg merge --abort' to abandon\n"
1262 b"or 'hg merge --abort' to abandon\n"
1273 )
1263 )
1274 )
1264 )
1275 elif remind:
1265 elif remind:
1276 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1266 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1277 return stats.unresolvedcount > 0
1267 return stats.unresolvedcount > 0
1278
1268
1279
1269
1280 def abortmerge(ui, repo):
1270 def abortmerge(ui, repo):
1281 ms = mergestatemod.mergestate.read(repo)
1271 ms = mergestatemod.mergestate.read(repo)
1282 if ms.active():
1272 if ms.active():
1283 # there were conflicts
1273 # there were conflicts
1284 node = ms.localctx.hex()
1274 node = ms.localctx.hex()
1285 else:
1275 else:
1286 # there were no conficts, mergestate was not stored
1276 # there were no conficts, mergestate was not stored
1287 node = repo[b'.'].hex()
1277 node = repo[b'.'].hex()
1288
1278
1289 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1279 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1290 stats = mergemod.clean_update(repo[node])
1280 stats = mergemod.clean_update(repo[node])
1291 assert stats.unresolvedcount == 0
1281 assert stats.unresolvedcount == 0
1292 _showstats(repo, stats)
1282 _showstats(repo, stats)
1293
1283
1294
1284
1295 def _incoming(
1285 def _incoming(
1296 displaychlist,
1286 displaychlist,
1297 subreporecurse,
1287 subreporecurse,
1298 ui,
1288 ui,
1299 repo,
1289 repo,
1300 source,
1290 source,
1301 opts,
1291 opts,
1302 buffered=False,
1292 buffered=False,
1303 subpath=None,
1293 subpath=None,
1304 ):
1294 ):
1305 """
1295 """
1306 Helper for incoming / gincoming.
1296 Helper for incoming / gincoming.
1307 displaychlist gets called with
1297 displaychlist gets called with
1308 (remoterepo, incomingchangesetlist, displayer) parameters,
1298 (remoterepo, incomingchangesetlist, displayer) parameters,
1309 and is supposed to contain only code that can't be unified.
1299 and is supposed to contain only code that can't be unified.
1310 """
1300 """
1311 srcs = urlutil.get_pull_paths(repo, ui, [source])
1301 srcs = urlutil.get_pull_paths(repo, ui, [source])
1312 srcs = list(srcs)
1302 srcs = list(srcs)
1313 if len(srcs) != 1:
1303 if len(srcs) != 1:
1314 msg = _(b'for now, incoming supports only a single source, %d provided')
1304 msg = _(b'for now, incoming supports only a single source, %d provided')
1315 msg %= len(srcs)
1305 msg %= len(srcs)
1316 raise error.Abort(msg)
1306 raise error.Abort(msg)
1317 path = srcs[0]
1307 path = srcs[0]
1318 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1308 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1319 if subpath is not None:
1309 if subpath is not None:
1320 subpath = urlutil.url(subpath)
1310 subpath = urlutil.url(subpath)
1321 if subpath.isabs():
1311 if subpath.isabs():
1322 source = bytes(subpath)
1312 source = bytes(subpath)
1323 else:
1313 else:
1324 p = urlutil.url(source)
1314 p = urlutil.url(source)
1325 if p.islocal():
1315 if p.islocal():
1326 normpath = os.path.normpath
1316 normpath = os.path.normpath
1327 else:
1317 else:
1328 normpath = posixpath.normpath
1318 normpath = posixpath.normpath
1329 p.path = normpath(b'%s/%s' % (p.path, subpath))
1319 p.path = normpath(b'%s/%s' % (p.path, subpath))
1330 source = bytes(p)
1320 source = bytes(p)
1331 other = peer(repo, opts, source)
1321 other = peer(repo, opts, source)
1332 cleanupfn = other.close
1322 cleanupfn = other.close
1333 try:
1323 try:
1334 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1324 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1335 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1325 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1336
1326
1337 if revs:
1327 if revs:
1338 revs = [other.lookup(rev) for rev in revs]
1328 revs = [other.lookup(rev) for rev in revs]
1339 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1329 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1340 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1330 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1341 )
1331 )
1342
1332
1343 if not chlist:
1333 if not chlist:
1344 ui.status(_(b"no changes found\n"))
1334 ui.status(_(b"no changes found\n"))
1345 return subreporecurse()
1335 return subreporecurse()
1346 ui.pager(b'incoming')
1336 ui.pager(b'incoming')
1347 displayer = logcmdutil.changesetdisplayer(
1337 displayer = logcmdutil.changesetdisplayer(
1348 ui, other, opts, buffered=buffered
1338 ui, other, opts, buffered=buffered
1349 )
1339 )
1350 displaychlist(other, chlist, displayer)
1340 displaychlist(other, chlist, displayer)
1351 displayer.close()
1341 displayer.close()
1352 finally:
1342 finally:
1353 cleanupfn()
1343 cleanupfn()
1354 subreporecurse()
1344 subreporecurse()
1355 return 0 # exit code is zero since we found incoming changes
1345 return 0 # exit code is zero since we found incoming changes
1356
1346
1357
1347
1358 def incoming(ui, repo, source, opts, subpath=None):
1348 def incoming(ui, repo, source, opts, subpath=None):
1359 def subreporecurse():
1349 def subreporecurse():
1360 ret = 1
1350 ret = 1
1361 if opts.get(b'subrepos'):
1351 if opts.get(b'subrepos'):
1362 ctx = repo[None]
1352 ctx = repo[None]
1363 for subpath in sorted(ctx.substate):
1353 for subpath in sorted(ctx.substate):
1364 sub = ctx.sub(subpath)
1354 sub = ctx.sub(subpath)
1365 ret = min(ret, sub.incoming(ui, source, opts))
1355 ret = min(ret, sub.incoming(ui, source, opts))
1366 return ret
1356 return ret
1367
1357
1368 def display(other, chlist, displayer):
1358 def display(other, chlist, displayer):
1369 limit = logcmdutil.getlimit(opts)
1359 limit = logcmdutil.getlimit(opts)
1370 if opts.get(b'newest_first'):
1360 if opts.get(b'newest_first'):
1371 chlist.reverse()
1361 chlist.reverse()
1372 count = 0
1362 count = 0
1373 for n in chlist:
1363 for n in chlist:
1374 if limit is not None and count >= limit:
1364 if limit is not None and count >= limit:
1375 break
1365 break
1376 parents = [
1366 parents = [
1377 p for p in other.changelog.parents(n) if p != repo.nullid
1367 p for p in other.changelog.parents(n) if p != repo.nullid
1378 ]
1368 ]
1379 if opts.get(b'no_merges') and len(parents) == 2:
1369 if opts.get(b'no_merges') and len(parents) == 2:
1380 continue
1370 continue
1381 count += 1
1371 count += 1
1382 displayer.show(other[n])
1372 displayer.show(other[n])
1383
1373
1384 return _incoming(
1374 return _incoming(
1385 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1375 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1386 )
1376 )
1387
1377
1388
1378
1389 def _outgoing(ui, repo, dests, opts, subpath=None):
1379 def _outgoing(ui, repo, dests, opts, subpath=None):
1390 out = set()
1380 out = set()
1391 others = []
1381 others = []
1392 for path in urlutil.get_push_paths(repo, ui, dests):
1382 for path in urlutil.get_push_paths(repo, ui, dests):
1393 dest = path.pushloc or path.loc
1383 dest = path.pushloc or path.loc
1394 if subpath is not None:
1384 if subpath is not None:
1395 subpath = urlutil.url(subpath)
1385 subpath = urlutil.url(subpath)
1396 if subpath.isabs():
1386 if subpath.isabs():
1397 dest = bytes(subpath)
1387 dest = bytes(subpath)
1398 else:
1388 else:
1399 p = urlutil.url(dest)
1389 p = urlutil.url(dest)
1400 if p.islocal():
1390 if p.islocal():
1401 normpath = os.path.normpath
1391 normpath = os.path.normpath
1402 else:
1392 else:
1403 normpath = posixpath.normpath
1393 normpath = posixpath.normpath
1404 p.path = normpath(b'%s/%s' % (p.path, subpath))
1394 p.path = normpath(b'%s/%s' % (p.path, subpath))
1405 dest = bytes(p)
1395 dest = bytes(p)
1406 branches = path.branch, opts.get(b'branch') or []
1396 branches = path.branch, opts.get(b'branch') or []
1407
1397
1408 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1398 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1409 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1399 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1410 if revs:
1400 if revs:
1411 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1401 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1412
1402
1413 other = peer(repo, opts, dest)
1403 other = peer(repo, opts, dest)
1414 try:
1404 try:
1415 outgoing = discovery.findcommonoutgoing(
1405 outgoing = discovery.findcommonoutgoing(
1416 repo, other, revs, force=opts.get(b'force')
1406 repo, other, revs, force=opts.get(b'force')
1417 )
1407 )
1418 o = outgoing.missing
1408 o = outgoing.missing
1419 out.update(o)
1409 out.update(o)
1420 if not o:
1410 if not o:
1421 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1411 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1422 others.append(other)
1412 others.append(other)
1423 except: # re-raises
1413 except: # re-raises
1424 other.close()
1414 other.close()
1425 raise
1415 raise
1426 # make sure this is ordered by revision number
1416 # make sure this is ordered by revision number
1427 outgoing_revs = list(out)
1417 outgoing_revs = list(out)
1428 cl = repo.changelog
1418 cl = repo.changelog
1429 outgoing_revs.sort(key=cl.rev)
1419 outgoing_revs.sort(key=cl.rev)
1430 return outgoing_revs, others
1420 return outgoing_revs, others
1431
1421
1432
1422
1433 def _outgoing_recurse(ui, repo, dests, opts):
1423 def _outgoing_recurse(ui, repo, dests, opts):
1434 ret = 1
1424 ret = 1
1435 if opts.get(b'subrepos'):
1425 if opts.get(b'subrepos'):
1436 ctx = repo[None]
1426 ctx = repo[None]
1437 for subpath in sorted(ctx.substate):
1427 for subpath in sorted(ctx.substate):
1438 sub = ctx.sub(subpath)
1428 sub = ctx.sub(subpath)
1439 ret = min(ret, sub.outgoing(ui, dests, opts))
1429 ret = min(ret, sub.outgoing(ui, dests, opts))
1440 return ret
1430 return ret
1441
1431
1442
1432
1443 def _outgoing_filter(repo, revs, opts):
1433 def _outgoing_filter(repo, revs, opts):
1444 """apply revision filtering/ordering option for outgoing"""
1434 """apply revision filtering/ordering option for outgoing"""
1445 limit = logcmdutil.getlimit(opts)
1435 limit = logcmdutil.getlimit(opts)
1446 no_merges = opts.get(b'no_merges')
1436 no_merges = opts.get(b'no_merges')
1447 if opts.get(b'newest_first'):
1437 if opts.get(b'newest_first'):
1448 revs.reverse()
1438 revs.reverse()
1449 if limit is None and not no_merges:
1439 if limit is None and not no_merges:
1450 for r in revs:
1440 for r in revs:
1451 yield r
1441 yield r
1452 return
1442 return
1453
1443
1454 count = 0
1444 count = 0
1455 cl = repo.changelog
1445 cl = repo.changelog
1456 for n in revs:
1446 for n in revs:
1457 if limit is not None and count >= limit:
1447 if limit is not None and count >= limit:
1458 break
1448 break
1459 parents = [p for p in cl.parents(n) if p != repo.nullid]
1449 parents = [p for p in cl.parents(n) if p != repo.nullid]
1460 if no_merges and len(parents) == 2:
1450 if no_merges and len(parents) == 2:
1461 continue
1451 continue
1462 count += 1
1452 count += 1
1463 yield n
1453 yield n
1464
1454
1465
1455
1466 def outgoing(ui, repo, dests, opts, subpath=None):
1456 def outgoing(ui, repo, dests, opts, subpath=None):
1467 if opts.get(b'graph'):
1457 if opts.get(b'graph'):
1468 logcmdutil.checkunsupportedgraphflags([], opts)
1458 logcmdutil.checkunsupportedgraphflags([], opts)
1469 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1459 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1470 ret = 1
1460 ret = 1
1471 try:
1461 try:
1472 if o:
1462 if o:
1473 ret = 0
1463 ret = 0
1474
1464
1475 if opts.get(b'graph'):
1465 if opts.get(b'graph'):
1476 revdag = logcmdutil.graphrevs(repo, o, opts)
1466 revdag = logcmdutil.graphrevs(repo, o, opts)
1477 ui.pager(b'outgoing')
1467 ui.pager(b'outgoing')
1478 displayer = logcmdutil.changesetdisplayer(
1468 displayer = logcmdutil.changesetdisplayer(
1479 ui, repo, opts, buffered=True
1469 ui, repo, opts, buffered=True
1480 )
1470 )
1481 logcmdutil.displaygraph(
1471 logcmdutil.displaygraph(
1482 ui, repo, revdag, displayer, graphmod.asciiedges
1472 ui, repo, revdag, displayer, graphmod.asciiedges
1483 )
1473 )
1484 else:
1474 else:
1485 ui.pager(b'outgoing')
1475 ui.pager(b'outgoing')
1486 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1476 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1487 for n in _outgoing_filter(repo, o, opts):
1477 for n in _outgoing_filter(repo, o, opts):
1488 displayer.show(repo[n])
1478 displayer.show(repo[n])
1489 displayer.close()
1479 displayer.close()
1490 for oth in others:
1480 for oth in others:
1491 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1481 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1492 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1482 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1493 return ret # exit code is zero since we found outgoing changes
1483 return ret # exit code is zero since we found outgoing changes
1494 finally:
1484 finally:
1495 for oth in others:
1485 for oth in others:
1496 oth.close()
1486 oth.close()
1497
1487
1498
1488
1499 def verify(repo, level=None):
1489 def verify(repo, level=None):
1500 """verify the consistency of a repository"""
1490 """verify the consistency of a repository"""
1501 ret = verifymod.verify(repo, level=level)
1491 ret = verifymod.verify(repo, level=level)
1502
1492
1503 # Broken subrepo references in hidden csets don't seem worth worrying about,
1493 # Broken subrepo references in hidden csets don't seem worth worrying about,
1504 # since they can't be pushed/pulled, and --hidden can be used if they are a
1494 # since they can't be pushed/pulled, and --hidden can be used if they are a
1505 # concern.
1495 # concern.
1506
1496
1507 # pathto() is needed for -R case
1497 # pathto() is needed for -R case
1508 revs = repo.revs(
1498 revs = repo.revs(
1509 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1499 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1510 )
1500 )
1511
1501
1512 if revs:
1502 if revs:
1513 repo.ui.status(_(b'checking subrepo links\n'))
1503 repo.ui.status(_(b'checking subrepo links\n'))
1514 for rev in revs:
1504 for rev in revs:
1515 ctx = repo[rev]
1505 ctx = repo[rev]
1516 try:
1506 try:
1517 for subpath in ctx.substate:
1507 for subpath in ctx.substate:
1518 try:
1508 try:
1519 ret = (
1509 ret = (
1520 ctx.sub(subpath, allowcreate=False).verify() or ret
1510 ctx.sub(subpath, allowcreate=False).verify() or ret
1521 )
1511 )
1522 except error.RepoError as e:
1512 except error.RepoError as e:
1523 repo.ui.warn(b'%d: %s\n' % (rev, e))
1513 repo.ui.warn(b'%d: %s\n' % (rev, e))
1524 except Exception:
1514 except Exception:
1525 repo.ui.warn(
1515 repo.ui.warn(
1526 _(b'.hgsubstate is corrupt in revision %s\n')
1516 _(b'.hgsubstate is corrupt in revision %s\n')
1527 % short(ctx.node())
1517 % short(ctx.node())
1528 )
1518 )
1529
1519
1530 return ret
1520 return ret
1531
1521
1532
1522
1533 def remoteui(src, opts):
1523 def remoteui(src, opts):
1534 """build a remote ui from ui or repo and opts"""
1524 """build a remote ui from ui or repo and opts"""
1535 if util.safehasattr(src, b'baseui'): # looks like a repository
1525 if util.safehasattr(src, b'baseui'): # looks like a repository
1536 dst = src.baseui.copy() # drop repo-specific config
1526 dst = src.baseui.copy() # drop repo-specific config
1537 src = src.ui # copy target options from repo
1527 src = src.ui # copy target options from repo
1538 else: # assume it's a global ui object
1528 else: # assume it's a global ui object
1539 dst = src.copy() # keep all global options
1529 dst = src.copy() # keep all global options
1540
1530
1541 # copy ssh-specific options
1531 # copy ssh-specific options
1542 for o in b'ssh', b'remotecmd':
1532 for o in b'ssh', b'remotecmd':
1543 v = opts.get(o) or src.config(b'ui', o)
1533 v = opts.get(o) or src.config(b'ui', o)
1544 if v:
1534 if v:
1545 dst.setconfig(b"ui", o, v, b'copied')
1535 dst.setconfig(b"ui", o, v, b'copied')
1546
1536
1547 # copy bundle-specific options
1537 # copy bundle-specific options
1548 r = src.config(b'bundle', b'mainreporoot')
1538 r = src.config(b'bundle', b'mainreporoot')
1549 if r:
1539 if r:
1550 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1540 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1551
1541
1552 # copy selected local settings to the remote ui
1542 # copy selected local settings to the remote ui
1553 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1543 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1554 for key, val in src.configitems(sect):
1544 for key, val in src.configitems(sect):
1555 dst.setconfig(sect, key, val, b'copied')
1545 dst.setconfig(sect, key, val, b'copied')
1556 v = src.config(b'web', b'cacerts')
1546 v = src.config(b'web', b'cacerts')
1557 if v:
1547 if v:
1558 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1548 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1559
1549
1560 return dst
1550 return dst
1561
1551
1562
1552
1563 # Files of interest
1553 # Files of interest
1564 # Used to check if the repository has changed looking at mtime and size of
1554 # Used to check if the repository has changed looking at mtime and size of
1565 # these files.
1555 # these files.
1566 foi = [
1556 foi = [
1567 (b'spath', b'00changelog.i'),
1557 (b'spath', b'00changelog.i'),
1568 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1558 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1569 (b'spath', b'obsstore'),
1559 (b'spath', b'obsstore'),
1570 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1560 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1571 ]
1561 ]
1572
1562
1573
1563
1574 class cachedlocalrepo:
1564 class cachedlocalrepo:
1575 """Holds a localrepository that can be cached and reused."""
1565 """Holds a localrepository that can be cached and reused."""
1576
1566
1577 def __init__(self, repo):
1567 def __init__(self, repo):
1578 """Create a new cached repo from an existing repo.
1568 """Create a new cached repo from an existing repo.
1579
1569
1580 We assume the passed in repo was recently created. If the
1570 We assume the passed in repo was recently created. If the
1581 repo has changed between when it was created and when it was
1571 repo has changed between when it was created and when it was
1582 turned into a cache, it may not refresh properly.
1572 turned into a cache, it may not refresh properly.
1583 """
1573 """
1584 assert isinstance(repo, localrepo.localrepository)
1574 assert isinstance(repo, localrepo.localrepository)
1585 self._repo = repo
1575 self._repo = repo
1586 self._state, self.mtime = self._repostate()
1576 self._state, self.mtime = self._repostate()
1587 self._filtername = repo.filtername
1577 self._filtername = repo.filtername
1588
1578
1589 def fetch(self):
1579 def fetch(self):
1590 """Refresh (if necessary) and return a repository.
1580 """Refresh (if necessary) and return a repository.
1591
1581
1592 If the cached instance is out of date, it will be recreated
1582 If the cached instance is out of date, it will be recreated
1593 automatically and returned.
1583 automatically and returned.
1594
1584
1595 Returns a tuple of the repo and a boolean indicating whether a new
1585 Returns a tuple of the repo and a boolean indicating whether a new
1596 repo instance was created.
1586 repo instance was created.
1597 """
1587 """
1598 # We compare the mtimes and sizes of some well-known files to
1588 # We compare the mtimes and sizes of some well-known files to
1599 # determine if the repo changed. This is not precise, as mtimes
1589 # determine if the repo changed. This is not precise, as mtimes
1600 # are susceptible to clock skew and imprecise filesystems and
1590 # are susceptible to clock skew and imprecise filesystems and
1601 # file content can change while maintaining the same size.
1591 # file content can change while maintaining the same size.
1602
1592
1603 state, mtime = self._repostate()
1593 state, mtime = self._repostate()
1604 if state == self._state:
1594 if state == self._state:
1605 return self._repo, False
1595 return self._repo, False
1606
1596
1607 repo = repository(self._repo.baseui, self._repo.url())
1597 repo = repository(self._repo.baseui, self._repo.url())
1608 if self._filtername:
1598 if self._filtername:
1609 self._repo = repo.filtered(self._filtername)
1599 self._repo = repo.filtered(self._filtername)
1610 else:
1600 else:
1611 self._repo = repo.unfiltered()
1601 self._repo = repo.unfiltered()
1612 self._state = state
1602 self._state = state
1613 self.mtime = mtime
1603 self.mtime = mtime
1614
1604
1615 return self._repo, True
1605 return self._repo, True
1616
1606
1617 def _repostate(self):
1607 def _repostate(self):
1618 state = []
1608 state = []
1619 maxmtime = -1
1609 maxmtime = -1
1620 for attr, fname in foi:
1610 for attr, fname in foi:
1621 prefix = getattr(self._repo, attr)
1611 prefix = getattr(self._repo, attr)
1622 p = os.path.join(prefix, fname)
1612 p = os.path.join(prefix, fname)
1623 try:
1613 try:
1624 st = os.stat(p)
1614 st = os.stat(p)
1625 except OSError:
1615 except OSError:
1626 st = os.stat(prefix)
1616 st = os.stat(prefix)
1627 state.append((st[stat.ST_MTIME], st.st_size))
1617 state.append((st[stat.ST_MTIME], st.st_size))
1628 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1618 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1629
1619
1630 return tuple(state), maxmtime
1620 return tuple(state), maxmtime
1631
1621
1632 def copy(self):
1622 def copy(self):
1633 """Obtain a copy of this class instance.
1623 """Obtain a copy of this class instance.
1634
1624
1635 A new localrepository instance is obtained. The new instance should be
1625 A new localrepository instance is obtained. The new instance should be
1636 completely independent of the original.
1626 completely independent of the original.
1637 """
1627 """
1638 repo = repository(self._repo.baseui, self._repo.origroot)
1628 repo = repository(self._repo.baseui, self._repo.origroot)
1639 if self._filtername:
1629 if self._filtername:
1640 repo = repo.filtered(self._filtername)
1630 repo = repo.filtered(self._filtername)
1641 else:
1631 else:
1642 repo = repo.unfiltered()
1632 repo = repo.unfiltered()
1643 c = cachedlocalrepo(repo)
1633 c = cachedlocalrepo(repo)
1644 c._state = self._state
1634 c._state = self._state
1645 c.mtime = self.mtime
1635 c.mtime = self.mtime
1646 return c
1636 return c
General Comments 0
You need to be logged in to leave comments. Login now