##// END OF EJS Templates
peer-or-repo: build a peer directly in the `peer` function...
marmoute -
r50587:c0acf544 default
parent child Browse files
Show More
@@ -1,1624 +1,1643 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _peerorrepo(
195 def _peerorrepo(
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
196 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
197 ):
197 ):
198 """return a repository object for the specified path"""
198 """return a repository object for the specified path"""
199 cls = _peerlookup(path)
199 cls = _peerlookup(path)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
200 obj = cls.instance(ui, path, create, intents=intents, createopts=createopts)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
201 _setup_repo_or_peer(ui, obj, presetupfuncs)
202 return obj
202 return obj
203
203
204
204
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
205 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
206 ui = getattr(obj, "ui", ui)
206 ui = getattr(obj, "ui", ui)
207 for f in presetupfuncs or []:
207 for f in presetupfuncs or []:
208 f(ui, obj)
208 f(ui, obj)
209 ui.log(b'extension', b'- executing reposetup hooks\n')
209 ui.log(b'extension', b'- executing reposetup hooks\n')
210 with util.timedcm('all reposetup') as allreposetupstats:
210 with util.timedcm('all reposetup') as allreposetupstats:
211 for name, module in extensions.extensions(ui):
211 for name, module in extensions.extensions(ui):
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
212 ui.log(b'extension', b' - running reposetup for %s\n', name)
213 hook = getattr(module, 'reposetup', None)
213 hook = getattr(module, 'reposetup', None)
214 if hook:
214 if hook:
215 with util.timedcm('reposetup %r', name) as stats:
215 with util.timedcm('reposetup %r', name) as stats:
216 hook(ui, obj)
216 hook(ui, obj)
217 msg = b' > reposetup for %s took %s\n'
217 msg = b' > reposetup for %s took %s\n'
218 ui.log(b'extension', msg, name, stats)
218 ui.log(b'extension', msg, name, stats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
219 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
220 if not obj.local():
220 if not obj.local():
221 for f in wirepeersetupfuncs:
221 for f in wirepeersetupfuncs:
222 f(ui, obj)
222 f(ui, obj)
223
223
224
224
225 def repository(
225 def repository(
226 ui,
226 ui,
227 path=b'',
227 path=b'',
228 create=False,
228 create=False,
229 presetupfuncs=None,
229 presetupfuncs=None,
230 intents=None,
230 intents=None,
231 createopts=None,
231 createopts=None,
232 ):
232 ):
233 """return a repository object for the specified path"""
233 """return a repository object for the specified path"""
234 peer = _peerorrepo(
234 peer = _peerorrepo(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 presetupfuncs=presetupfuncs,
238 presetupfuncs=presetupfuncs,
239 intents=intents,
239 intents=intents,
240 createopts=createopts,
240 createopts=createopts,
241 )
241 )
242 repo = peer.local()
242 repo = peer.local()
243 if not repo:
243 if not repo:
244 raise error.Abort(
244 raise error.Abort(
245 _(b"repository '%s' is not local") % (path or peer.url())
245 _(b"repository '%s' is not local") % (path or peer.url())
246 )
246 )
247 return repo.filtered(b'visible')
247 return repo.filtered(b'visible')
248
248
249
249
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
250 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
251 '''return a repository peer for the specified path'''
251 '''return a repository peer for the specified path'''
252 rui = remoteui(uiorrepo, opts)
252 rui = remoteui(uiorrepo, opts)
253 return _peerorrepo(
253 scheme = urlutil.url(path).scheme
254 rui, path, create, intents=intents, createopts=createopts
254 if scheme in peer_schemes:
255 ).peer()
255 cls = peer_schemes[scheme]
256 peer = cls.instance(
257 rui,
258 path,
259 create,
260 intents=intents,
261 createopts=createopts,
262 )
263 _setup_repo_or_peer(rui, peer)
264 else:
265 # this is a repository
266 repo = repository(
267 rui,
268 path,
269 create,
270 intents=intents,
271 createopts=createopts,
272 )
273 peer = repo.peer()
274 return peer
256
275
257
276
258 def defaultdest(source):
277 def defaultdest(source):
259 """return default destination of clone if none is given
278 """return default destination of clone if none is given
260
279
261 >>> defaultdest(b'foo')
280 >>> defaultdest(b'foo')
262 'foo'
281 'foo'
263 >>> defaultdest(b'/foo/bar')
282 >>> defaultdest(b'/foo/bar')
264 'bar'
283 'bar'
265 >>> defaultdest(b'/')
284 >>> defaultdest(b'/')
266 ''
285 ''
267 >>> defaultdest(b'')
286 >>> defaultdest(b'')
268 ''
287 ''
269 >>> defaultdest(b'http://example.org/')
288 >>> defaultdest(b'http://example.org/')
270 ''
289 ''
271 >>> defaultdest(b'http://example.org/foo/')
290 >>> defaultdest(b'http://example.org/foo/')
272 'foo'
291 'foo'
273 """
292 """
274 path = urlutil.url(source).path
293 path = urlutil.url(source).path
275 if not path:
294 if not path:
276 return b''
295 return b''
277 return os.path.basename(os.path.normpath(path))
296 return os.path.basename(os.path.normpath(path))
278
297
279
298
280 def sharedreposource(repo):
299 def sharedreposource(repo):
281 """Returns repository object for source repository of a shared repo.
300 """Returns repository object for source repository of a shared repo.
282
301
283 If repo is not a shared repository, returns None.
302 If repo is not a shared repository, returns None.
284 """
303 """
285 if repo.sharedpath == repo.path:
304 if repo.sharedpath == repo.path:
286 return None
305 return None
287
306
288 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
307 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
289 return repo.srcrepo
308 return repo.srcrepo
290
309
291 # the sharedpath always ends in the .hg; we want the path to the repo
310 # the sharedpath always ends in the .hg; we want the path to the repo
292 source = repo.vfs.split(repo.sharedpath)[0]
311 source = repo.vfs.split(repo.sharedpath)[0]
293 srcurl, branches = urlutil.parseurl(source)
312 srcurl, branches = urlutil.parseurl(source)
294 srcrepo = repository(repo.ui, srcurl)
313 srcrepo = repository(repo.ui, srcurl)
295 repo.srcrepo = srcrepo
314 repo.srcrepo = srcrepo
296 return srcrepo
315 return srcrepo
297
316
298
317
299 def share(
318 def share(
300 ui,
319 ui,
301 source,
320 source,
302 dest=None,
321 dest=None,
303 update=True,
322 update=True,
304 bookmarks=True,
323 bookmarks=True,
305 defaultpath=None,
324 defaultpath=None,
306 relative=False,
325 relative=False,
307 ):
326 ):
308 '''create a shared repository'''
327 '''create a shared repository'''
309
328
310 not_local_msg = _(b'can only share local repositories')
329 not_local_msg = _(b'can only share local repositories')
311 if util.safehasattr(source, 'local'):
330 if util.safehasattr(source, 'local'):
312 if source.local() is None:
331 if source.local() is None:
313 raise error.Abort(not_local_msg)
332 raise error.Abort(not_local_msg)
314 elif not islocal(source):
333 elif not islocal(source):
315 # XXX why are we getting bytes here ?
334 # XXX why are we getting bytes here ?
316 raise error.Abort(not_local_msg)
335 raise error.Abort(not_local_msg)
317
336
318 if not dest:
337 if not dest:
319 dest = defaultdest(source)
338 dest = defaultdest(source)
320 else:
339 else:
321 dest = urlutil.get_clone_path(ui, dest)[1]
340 dest = urlutil.get_clone_path(ui, dest)[1]
322
341
323 if isinstance(source, bytes):
342 if isinstance(source, bytes):
324 origsource, source, branches = urlutil.get_clone_path(ui, source)
343 origsource, source, branches = urlutil.get_clone_path(ui, source)
325 srcrepo = repository(ui, source)
344 srcrepo = repository(ui, source)
326 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
345 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
327 else:
346 else:
328 srcrepo = source.local()
347 srcrepo = source.local()
329 checkout = None
348 checkout = None
330
349
331 shareditems = set()
350 shareditems = set()
332 if bookmarks:
351 if bookmarks:
333 shareditems.add(sharedbookmarks)
352 shareditems.add(sharedbookmarks)
334
353
335 r = repository(
354 r = repository(
336 ui,
355 ui,
337 dest,
356 dest,
338 create=True,
357 create=True,
339 createopts={
358 createopts={
340 b'sharedrepo': srcrepo,
359 b'sharedrepo': srcrepo,
341 b'sharedrelative': relative,
360 b'sharedrelative': relative,
342 b'shareditems': shareditems,
361 b'shareditems': shareditems,
343 },
362 },
344 )
363 )
345
364
346 postshare(srcrepo, r, defaultpath=defaultpath)
365 postshare(srcrepo, r, defaultpath=defaultpath)
347 r = repository(ui, dest)
366 r = repository(ui, dest)
348 _postshareupdate(r, update, checkout=checkout)
367 _postshareupdate(r, update, checkout=checkout)
349 return r
368 return r
350
369
351
370
352 def _prependsourcehgrc(repo):
371 def _prependsourcehgrc(repo):
353 """copies the source repo config and prepend it in current repo .hg/hgrc
372 """copies the source repo config and prepend it in current repo .hg/hgrc
354 on unshare. This is only done if the share was perfomed using share safe
373 on unshare. This is only done if the share was perfomed using share safe
355 method where we share config of source in shares"""
374 method where we share config of source in shares"""
356 srcvfs = vfsmod.vfs(repo.sharedpath)
375 srcvfs = vfsmod.vfs(repo.sharedpath)
357 dstvfs = vfsmod.vfs(repo.path)
376 dstvfs = vfsmod.vfs(repo.path)
358
377
359 if not srcvfs.exists(b'hgrc'):
378 if not srcvfs.exists(b'hgrc'):
360 return
379 return
361
380
362 currentconfig = b''
381 currentconfig = b''
363 if dstvfs.exists(b'hgrc'):
382 if dstvfs.exists(b'hgrc'):
364 currentconfig = dstvfs.read(b'hgrc')
383 currentconfig = dstvfs.read(b'hgrc')
365
384
366 with dstvfs(b'hgrc', b'wb') as fp:
385 with dstvfs(b'hgrc', b'wb') as fp:
367 sourceconfig = srcvfs.read(b'hgrc')
386 sourceconfig = srcvfs.read(b'hgrc')
368 fp.write(b"# Config copied from shared source\n")
387 fp.write(b"# Config copied from shared source\n")
369 fp.write(sourceconfig)
388 fp.write(sourceconfig)
370 fp.write(b'\n')
389 fp.write(b'\n')
371 fp.write(currentconfig)
390 fp.write(currentconfig)
372
391
373
392
374 def unshare(ui, repo):
393 def unshare(ui, repo):
375 """convert a shared repository to a normal one
394 """convert a shared repository to a normal one
376
395
377 Copy the store data to the repo and remove the sharedpath data.
396 Copy the store data to the repo and remove the sharedpath data.
378
397
379 Returns a new repository object representing the unshared repository.
398 Returns a new repository object representing the unshared repository.
380
399
381 The passed repository object is not usable after this function is
400 The passed repository object is not usable after this function is
382 called.
401 called.
383 """
402 """
384
403
385 with repo.lock():
404 with repo.lock():
386 # we use locks here because if we race with commit, we
405 # we use locks here because if we race with commit, we
387 # can end up with extra data in the cloned revlogs that's
406 # can end up with extra data in the cloned revlogs that's
388 # not pointed to by changesets, thus causing verify to
407 # not pointed to by changesets, thus causing verify to
389 # fail
408 # fail
390 destlock = copystore(ui, repo, repo.path)
409 destlock = copystore(ui, repo, repo.path)
391 with destlock or util.nullcontextmanager():
410 with destlock or util.nullcontextmanager():
392 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
411 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
393 # we were sharing .hg/hgrc of the share source with the current
412 # we were sharing .hg/hgrc of the share source with the current
394 # repo. We need to copy that while unsharing otherwise it can
413 # repo. We need to copy that while unsharing otherwise it can
395 # disable hooks and other checks
414 # disable hooks and other checks
396 _prependsourcehgrc(repo)
415 _prependsourcehgrc(repo)
397
416
398 sharefile = repo.vfs.join(b'sharedpath')
417 sharefile = repo.vfs.join(b'sharedpath')
399 util.rename(sharefile, sharefile + b'.old')
418 util.rename(sharefile, sharefile + b'.old')
400
419
401 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
420 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
402 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
421 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
403 scmutil.writereporequirements(repo)
422 scmutil.writereporequirements(repo)
404
423
405 # Removing share changes some fundamental properties of the repo instance.
424 # Removing share changes some fundamental properties of the repo instance.
406 # So we instantiate a new repo object and operate on it rather than
425 # So we instantiate a new repo object and operate on it rather than
407 # try to keep the existing repo usable.
426 # try to keep the existing repo usable.
408 newrepo = repository(repo.baseui, repo.root, create=False)
427 newrepo = repository(repo.baseui, repo.root, create=False)
409
428
410 # TODO: figure out how to access subrepos that exist, but were previously
429 # TODO: figure out how to access subrepos that exist, but were previously
411 # removed from .hgsub
430 # removed from .hgsub
412 c = newrepo[b'.']
431 c = newrepo[b'.']
413 subs = c.substate
432 subs = c.substate
414 for s in sorted(subs):
433 for s in sorted(subs):
415 c.sub(s).unshare()
434 c.sub(s).unshare()
416
435
417 localrepo.poisonrepository(repo)
436 localrepo.poisonrepository(repo)
418
437
419 return newrepo
438 return newrepo
420
439
421
440
422 def postshare(sourcerepo, destrepo, defaultpath=None):
441 def postshare(sourcerepo, destrepo, defaultpath=None):
423 """Called after a new shared repo is created.
442 """Called after a new shared repo is created.
424
443
425 The new repo only has a requirements file and pointer to the source.
444 The new repo only has a requirements file and pointer to the source.
426 This function configures additional shared data.
445 This function configures additional shared data.
427
446
428 Extensions can wrap this function and write additional entries to
447 Extensions can wrap this function and write additional entries to
429 destrepo/.hg/shared to indicate additional pieces of data to be shared.
448 destrepo/.hg/shared to indicate additional pieces of data to be shared.
430 """
449 """
431 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
450 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
432 if default:
451 if default:
433 template = b'[paths]\ndefault = %s\n'
452 template = b'[paths]\ndefault = %s\n'
434 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
453 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
435 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
454 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
436 with destrepo.wlock():
455 with destrepo.wlock():
437 narrowspec.copytoworkingcopy(destrepo)
456 narrowspec.copytoworkingcopy(destrepo)
438
457
439
458
440 def _postshareupdate(repo, update, checkout=None):
459 def _postshareupdate(repo, update, checkout=None):
441 """Maybe perform a working directory update after a shared repo is created.
460 """Maybe perform a working directory update after a shared repo is created.
442
461
443 ``update`` can be a boolean or a revision to update to.
462 ``update`` can be a boolean or a revision to update to.
444 """
463 """
445 if not update:
464 if not update:
446 return
465 return
447
466
448 repo.ui.status(_(b"updating working directory\n"))
467 repo.ui.status(_(b"updating working directory\n"))
449 if update is not True:
468 if update is not True:
450 checkout = update
469 checkout = update
451 for test in (checkout, b'default', b'tip'):
470 for test in (checkout, b'default', b'tip'):
452 if test is None:
471 if test is None:
453 continue
472 continue
454 try:
473 try:
455 uprev = repo.lookup(test)
474 uprev = repo.lookup(test)
456 break
475 break
457 except error.RepoLookupError:
476 except error.RepoLookupError:
458 continue
477 continue
459 _update(repo, uprev)
478 _update(repo, uprev)
460
479
461
480
462 def copystore(ui, srcrepo, destpath):
481 def copystore(ui, srcrepo, destpath):
463 """copy files from store of srcrepo in destpath
482 """copy files from store of srcrepo in destpath
464
483
465 returns destlock
484 returns destlock
466 """
485 """
467 destlock = None
486 destlock = None
468 try:
487 try:
469 hardlink = None
488 hardlink = None
470 topic = _(b'linking') if hardlink else _(b'copying')
489 topic = _(b'linking') if hardlink else _(b'copying')
471 with ui.makeprogress(topic, unit=_(b'files')) as progress:
490 with ui.makeprogress(topic, unit=_(b'files')) as progress:
472 num = 0
491 num = 0
473 srcpublishing = srcrepo.publishing()
492 srcpublishing = srcrepo.publishing()
474 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
493 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
475 dstvfs = vfsmod.vfs(destpath)
494 dstvfs = vfsmod.vfs(destpath)
476 for f in srcrepo.store.copylist():
495 for f in srcrepo.store.copylist():
477 if srcpublishing and f.endswith(b'phaseroots'):
496 if srcpublishing and f.endswith(b'phaseroots'):
478 continue
497 continue
479 dstbase = os.path.dirname(f)
498 dstbase = os.path.dirname(f)
480 if dstbase and not dstvfs.exists(dstbase):
499 if dstbase and not dstvfs.exists(dstbase):
481 dstvfs.mkdir(dstbase)
500 dstvfs.mkdir(dstbase)
482 if srcvfs.exists(f):
501 if srcvfs.exists(f):
483 if f.endswith(b'data'):
502 if f.endswith(b'data'):
484 # 'dstbase' may be empty (e.g. revlog format 0)
503 # 'dstbase' may be empty (e.g. revlog format 0)
485 lockfile = os.path.join(dstbase, b"lock")
504 lockfile = os.path.join(dstbase, b"lock")
486 # lock to avoid premature writing to the target
505 # lock to avoid premature writing to the target
487 destlock = lock.lock(dstvfs, lockfile)
506 destlock = lock.lock(dstvfs, lockfile)
488 hardlink, n = util.copyfiles(
507 hardlink, n = util.copyfiles(
489 srcvfs.join(f), dstvfs.join(f), hardlink, progress
508 srcvfs.join(f), dstvfs.join(f), hardlink, progress
490 )
509 )
491 num += n
510 num += n
492 if hardlink:
511 if hardlink:
493 ui.debug(b"linked %d files\n" % num)
512 ui.debug(b"linked %d files\n" % num)
494 else:
513 else:
495 ui.debug(b"copied %d files\n" % num)
514 ui.debug(b"copied %d files\n" % num)
496 return destlock
515 return destlock
497 except: # re-raises
516 except: # re-raises
498 release(destlock)
517 release(destlock)
499 raise
518 raise
500
519
501
520
502 def clonewithshare(
521 def clonewithshare(
503 ui,
522 ui,
504 peeropts,
523 peeropts,
505 sharepath,
524 sharepath,
506 source,
525 source,
507 srcpeer,
526 srcpeer,
508 dest,
527 dest,
509 pull=False,
528 pull=False,
510 rev=None,
529 rev=None,
511 update=True,
530 update=True,
512 stream=False,
531 stream=False,
513 ):
532 ):
514 """Perform a clone using a shared repo.
533 """Perform a clone using a shared repo.
515
534
516 The store for the repository will be located at <sharepath>/.hg. The
535 The store for the repository will be located at <sharepath>/.hg. The
517 specified revisions will be cloned or pulled from "source". A shared repo
536 specified revisions will be cloned or pulled from "source". A shared repo
518 will be created at "dest" and a working copy will be created if "update" is
537 will be created at "dest" and a working copy will be created if "update" is
519 True.
538 True.
520 """
539 """
521 revs = None
540 revs = None
522 if rev:
541 if rev:
523 if not srcpeer.capable(b'lookup'):
542 if not srcpeer.capable(b'lookup'):
524 raise error.Abort(
543 raise error.Abort(
525 _(
544 _(
526 b"src repository does not support "
545 b"src repository does not support "
527 b"revision lookup and so doesn't "
546 b"revision lookup and so doesn't "
528 b"support clone by revision"
547 b"support clone by revision"
529 )
548 )
530 )
549 )
531
550
532 # TODO this is batchable.
551 # TODO this is batchable.
533 remoterevs = []
552 remoterevs = []
534 for r in rev:
553 for r in rev:
535 with srcpeer.commandexecutor() as e:
554 with srcpeer.commandexecutor() as e:
536 remoterevs.append(
555 remoterevs.append(
537 e.callcommand(
556 e.callcommand(
538 b'lookup',
557 b'lookup',
539 {
558 {
540 b'key': r,
559 b'key': r,
541 },
560 },
542 ).result()
561 ).result()
543 )
562 )
544 revs = remoterevs
563 revs = remoterevs
545
564
546 # Obtain a lock before checking for or cloning the pooled repo otherwise
565 # Obtain a lock before checking for or cloning the pooled repo otherwise
547 # 2 clients may race creating or populating it.
566 # 2 clients may race creating or populating it.
548 pooldir = os.path.dirname(sharepath)
567 pooldir = os.path.dirname(sharepath)
549 # lock class requires the directory to exist.
568 # lock class requires the directory to exist.
550 try:
569 try:
551 util.makedir(pooldir, False)
570 util.makedir(pooldir, False)
552 except FileExistsError:
571 except FileExistsError:
553 pass
572 pass
554
573
555 poolvfs = vfsmod.vfs(pooldir)
574 poolvfs = vfsmod.vfs(pooldir)
556 basename = os.path.basename(sharepath)
575 basename = os.path.basename(sharepath)
557
576
558 with lock.lock(poolvfs, b'%s.lock' % basename):
577 with lock.lock(poolvfs, b'%s.lock' % basename):
559 if os.path.exists(sharepath):
578 if os.path.exists(sharepath):
560 ui.status(
579 ui.status(
561 _(b'(sharing from existing pooled repository %s)\n') % basename
580 _(b'(sharing from existing pooled repository %s)\n') % basename
562 )
581 )
563 else:
582 else:
564 ui.status(
583 ui.status(
565 _(b'(sharing from new pooled repository %s)\n') % basename
584 _(b'(sharing from new pooled repository %s)\n') % basename
566 )
585 )
567 # Always use pull mode because hardlinks in share mode don't work
586 # Always use pull mode because hardlinks in share mode don't work
568 # well. Never update because working copies aren't necessary in
587 # well. Never update because working copies aren't necessary in
569 # share mode.
588 # share mode.
570 clone(
589 clone(
571 ui,
590 ui,
572 peeropts,
591 peeropts,
573 source,
592 source,
574 dest=sharepath,
593 dest=sharepath,
575 pull=True,
594 pull=True,
576 revs=rev,
595 revs=rev,
577 update=False,
596 update=False,
578 stream=stream,
597 stream=stream,
579 )
598 )
580
599
581 # Resolve the value to put in [paths] section for the source.
600 # Resolve the value to put in [paths] section for the source.
582 if islocal(source):
601 if islocal(source):
583 defaultpath = util.abspath(urlutil.urllocalpath(source))
602 defaultpath = util.abspath(urlutil.urllocalpath(source))
584 else:
603 else:
585 defaultpath = source
604 defaultpath = source
586
605
587 sharerepo = repository(ui, path=sharepath)
606 sharerepo = repository(ui, path=sharepath)
588 destrepo = share(
607 destrepo = share(
589 ui,
608 ui,
590 sharerepo,
609 sharerepo,
591 dest=dest,
610 dest=dest,
592 update=False,
611 update=False,
593 bookmarks=False,
612 bookmarks=False,
594 defaultpath=defaultpath,
613 defaultpath=defaultpath,
595 )
614 )
596
615
597 # We need to perform a pull against the dest repo to fetch bookmarks
616 # We need to perform a pull against the dest repo to fetch bookmarks
598 # and other non-store data that isn't shared by default. In the case of
617 # and other non-store data that isn't shared by default. In the case of
599 # non-existing shared repo, this means we pull from the remote twice. This
618 # non-existing shared repo, this means we pull from the remote twice. This
600 # is a bit weird. But at the time it was implemented, there wasn't an easy
619 # is a bit weird. But at the time it was implemented, there wasn't an easy
601 # way to pull just non-changegroup data.
620 # way to pull just non-changegroup data.
602 exchange.pull(destrepo, srcpeer, heads=revs)
621 exchange.pull(destrepo, srcpeer, heads=revs)
603
622
604 _postshareupdate(destrepo, update)
623 _postshareupdate(destrepo, update)
605
624
606 return srcpeer, peer(ui, peeropts, dest)
625 return srcpeer, peer(ui, peeropts, dest)
607
626
608
627
609 # Recomputing caches is often slow on big repos, so copy them.
628 # Recomputing caches is often slow on big repos, so copy them.
610 def _copycache(srcrepo, dstcachedir, fname):
629 def _copycache(srcrepo, dstcachedir, fname):
611 """copy a cache from srcrepo to destcachedir (if it exists)"""
630 """copy a cache from srcrepo to destcachedir (if it exists)"""
612 srcfname = srcrepo.cachevfs.join(fname)
631 srcfname = srcrepo.cachevfs.join(fname)
613 dstfname = os.path.join(dstcachedir, fname)
632 dstfname = os.path.join(dstcachedir, fname)
614 if os.path.exists(srcfname):
633 if os.path.exists(srcfname):
615 if not os.path.exists(dstcachedir):
634 if not os.path.exists(dstcachedir):
616 os.mkdir(dstcachedir)
635 os.mkdir(dstcachedir)
617 util.copyfile(srcfname, dstfname)
636 util.copyfile(srcfname, dstfname)
618
637
619
638
620 def clone(
639 def clone(
621 ui,
640 ui,
622 peeropts,
641 peeropts,
623 source,
642 source,
624 dest=None,
643 dest=None,
625 pull=False,
644 pull=False,
626 revs=None,
645 revs=None,
627 update=True,
646 update=True,
628 stream=False,
647 stream=False,
629 branch=None,
648 branch=None,
630 shareopts=None,
649 shareopts=None,
631 storeincludepats=None,
650 storeincludepats=None,
632 storeexcludepats=None,
651 storeexcludepats=None,
633 depth=None,
652 depth=None,
634 ):
653 ):
635 """Make a copy of an existing repository.
654 """Make a copy of an existing repository.
636
655
637 Create a copy of an existing repository in a new directory. The
656 Create a copy of an existing repository in a new directory. The
638 source and destination are URLs, as passed to the repository
657 source and destination are URLs, as passed to the repository
639 function. Returns a pair of repository peers, the source and
658 function. Returns a pair of repository peers, the source and
640 newly created destination.
659 newly created destination.
641
660
642 The location of the source is added to the new repository's
661 The location of the source is added to the new repository's
643 .hg/hgrc file, as the default to be used for future pulls and
662 .hg/hgrc file, as the default to be used for future pulls and
644 pushes.
663 pushes.
645
664
646 If an exception is raised, the partly cloned/updated destination
665 If an exception is raised, the partly cloned/updated destination
647 repository will be deleted.
666 repository will be deleted.
648
667
649 Arguments:
668 Arguments:
650
669
651 source: repository object or URL
670 source: repository object or URL
652
671
653 dest: URL of destination repository to create (defaults to base
672 dest: URL of destination repository to create (defaults to base
654 name of source repository)
673 name of source repository)
655
674
656 pull: always pull from source repository, even in local case or if the
675 pull: always pull from source repository, even in local case or if the
657 server prefers streaming
676 server prefers streaming
658
677
659 stream: stream raw data uncompressed from repository (fast over
678 stream: stream raw data uncompressed from repository (fast over
660 LAN, slow over WAN)
679 LAN, slow over WAN)
661
680
662 revs: revision to clone up to (implies pull=True)
681 revs: revision to clone up to (implies pull=True)
663
682
664 update: update working directory after clone completes, if
683 update: update working directory after clone completes, if
665 destination is local repository (True means update to default rev,
684 destination is local repository (True means update to default rev,
666 anything else is treated as a revision)
685 anything else is treated as a revision)
667
686
668 branch: branches to clone
687 branch: branches to clone
669
688
670 shareopts: dict of options to control auto sharing behavior. The "pool" key
689 shareopts: dict of options to control auto sharing behavior. The "pool" key
671 activates auto sharing mode and defines the directory for stores. The
690 activates auto sharing mode and defines the directory for stores. The
672 "mode" key determines how to construct the directory name of the shared
691 "mode" key determines how to construct the directory name of the shared
673 repository. "identity" means the name is derived from the node of the first
692 repository. "identity" means the name is derived from the node of the first
674 changeset in the repository. "remote" means the name is derived from the
693 changeset in the repository. "remote" means the name is derived from the
675 remote's path/URL. Defaults to "identity."
694 remote's path/URL. Defaults to "identity."
676
695
677 storeincludepats and storeexcludepats: sets of file patterns to include and
696 storeincludepats and storeexcludepats: sets of file patterns to include and
678 exclude in the repository copy, respectively. If not defined, all files
697 exclude in the repository copy, respectively. If not defined, all files
679 will be included (a "full" clone). Otherwise a "narrow" clone containing
698 will be included (a "full" clone). Otherwise a "narrow" clone containing
680 only the requested files will be performed. If ``storeincludepats`` is not
699 only the requested files will be performed. If ``storeincludepats`` is not
681 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
700 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
682 ``path:.``. If both are empty sets, no files will be cloned.
701 ``path:.``. If both are empty sets, no files will be cloned.
683 """
702 """
684
703
685 if isinstance(source, bytes):
704 if isinstance(source, bytes):
686 src = urlutil.get_clone_path(ui, source, branch)
705 src = urlutil.get_clone_path(ui, source, branch)
687 origsource, source, branches = src
706 origsource, source, branches = src
688 srcpeer = peer(ui, peeropts, source)
707 srcpeer = peer(ui, peeropts, source)
689 else:
708 else:
690 srcpeer = source.peer() # in case we were called with a localrepo
709 srcpeer = source.peer() # in case we were called with a localrepo
691 branches = (None, branch or [])
710 branches = (None, branch or [])
692 origsource = source = srcpeer.url()
711 origsource = source = srcpeer.url()
693 srclock = destlock = destwlock = cleandir = None
712 srclock = destlock = destwlock = cleandir = None
694 destpeer = None
713 destpeer = None
695 try:
714 try:
696 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
715 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
697
716
698 if dest is None:
717 if dest is None:
699 dest = defaultdest(source)
718 dest = defaultdest(source)
700 if dest:
719 if dest:
701 ui.status(_(b"destination directory: %s\n") % dest)
720 ui.status(_(b"destination directory: %s\n") % dest)
702 else:
721 else:
703 dest = urlutil.get_clone_path(ui, dest)[0]
722 dest = urlutil.get_clone_path(ui, dest)[0]
704
723
705 dest = urlutil.urllocalpath(dest)
724 dest = urlutil.urllocalpath(dest)
706 source = urlutil.urllocalpath(source)
725 source = urlutil.urllocalpath(source)
707
726
708 if not dest:
727 if not dest:
709 raise error.InputError(_(b"empty destination path is not valid"))
728 raise error.InputError(_(b"empty destination path is not valid"))
710
729
711 destvfs = vfsmod.vfs(dest, expandpath=True)
730 destvfs = vfsmod.vfs(dest, expandpath=True)
712 if destvfs.lexists():
731 if destvfs.lexists():
713 if not destvfs.isdir():
732 if not destvfs.isdir():
714 raise error.InputError(
733 raise error.InputError(
715 _(b"destination '%s' already exists") % dest
734 _(b"destination '%s' already exists") % dest
716 )
735 )
717 elif destvfs.listdir():
736 elif destvfs.listdir():
718 raise error.InputError(
737 raise error.InputError(
719 _(b"destination '%s' is not empty") % dest
738 _(b"destination '%s' is not empty") % dest
720 )
739 )
721
740
722 createopts = {}
741 createopts = {}
723 narrow = False
742 narrow = False
724
743
725 if storeincludepats is not None:
744 if storeincludepats is not None:
726 narrowspec.validatepatterns(storeincludepats)
745 narrowspec.validatepatterns(storeincludepats)
727 narrow = True
746 narrow = True
728
747
729 if storeexcludepats is not None:
748 if storeexcludepats is not None:
730 narrowspec.validatepatterns(storeexcludepats)
749 narrowspec.validatepatterns(storeexcludepats)
731 narrow = True
750 narrow = True
732
751
733 if narrow:
752 if narrow:
734 # Include everything by default if only exclusion patterns defined.
753 # Include everything by default if only exclusion patterns defined.
735 if storeexcludepats and not storeincludepats:
754 if storeexcludepats and not storeincludepats:
736 storeincludepats = {b'path:.'}
755 storeincludepats = {b'path:.'}
737
756
738 createopts[b'narrowfiles'] = True
757 createopts[b'narrowfiles'] = True
739
758
740 if depth:
759 if depth:
741 createopts[b'shallowfilestore'] = True
760 createopts[b'shallowfilestore'] = True
742
761
743 if srcpeer.capable(b'lfs-serve'):
762 if srcpeer.capable(b'lfs-serve'):
744 # Repository creation honors the config if it disabled the extension, so
763 # Repository creation honors the config if it disabled the extension, so
745 # we can't just announce that lfs will be enabled. This check avoids
764 # we can't just announce that lfs will be enabled. This check avoids
746 # saying that lfs will be enabled, and then saying it's an unknown
765 # saying that lfs will be enabled, and then saying it's an unknown
747 # feature. The lfs creation option is set in either case so that a
766 # feature. The lfs creation option is set in either case so that a
748 # requirement is added. If the extension is explicitly disabled but the
767 # requirement is added. If the extension is explicitly disabled but the
749 # requirement is set, the clone aborts early, before transferring any
768 # requirement is set, the clone aborts early, before transferring any
750 # data.
769 # data.
751 createopts[b'lfs'] = True
770 createopts[b'lfs'] = True
752
771
753 if extensions.disabled_help(b'lfs'):
772 if extensions.disabled_help(b'lfs'):
754 ui.status(
773 ui.status(
755 _(
774 _(
756 b'(remote is using large file support (lfs), but it is '
775 b'(remote is using large file support (lfs), but it is '
757 b'explicitly disabled in the local configuration)\n'
776 b'explicitly disabled in the local configuration)\n'
758 )
777 )
759 )
778 )
760 else:
779 else:
761 ui.status(
780 ui.status(
762 _(
781 _(
763 b'(remote is using large file support (lfs); lfs will '
782 b'(remote is using large file support (lfs); lfs will '
764 b'be enabled for this repository)\n'
783 b'be enabled for this repository)\n'
765 )
784 )
766 )
785 )
767
786
768 shareopts = shareopts or {}
787 shareopts = shareopts or {}
769 sharepool = shareopts.get(b'pool')
788 sharepool = shareopts.get(b'pool')
770 sharenamemode = shareopts.get(b'mode')
789 sharenamemode = shareopts.get(b'mode')
771 if sharepool and islocal(dest):
790 if sharepool and islocal(dest):
772 sharepath = None
791 sharepath = None
773 if sharenamemode == b'identity':
792 if sharenamemode == b'identity':
774 # Resolve the name from the initial changeset in the remote
793 # Resolve the name from the initial changeset in the remote
775 # repository. This returns nullid when the remote is empty. It
794 # repository. This returns nullid when the remote is empty. It
776 # raises RepoLookupError if revision 0 is filtered or otherwise
795 # raises RepoLookupError if revision 0 is filtered or otherwise
777 # not available. If we fail to resolve, sharing is not enabled.
796 # not available. If we fail to resolve, sharing is not enabled.
778 try:
797 try:
779 with srcpeer.commandexecutor() as e:
798 with srcpeer.commandexecutor() as e:
780 rootnode = e.callcommand(
799 rootnode = e.callcommand(
781 b'lookup',
800 b'lookup',
782 {
801 {
783 b'key': b'0',
802 b'key': b'0',
784 },
803 },
785 ).result()
804 ).result()
786
805
787 if rootnode != sha1nodeconstants.nullid:
806 if rootnode != sha1nodeconstants.nullid:
788 sharepath = os.path.join(sharepool, hex(rootnode))
807 sharepath = os.path.join(sharepool, hex(rootnode))
789 else:
808 else:
790 ui.status(
809 ui.status(
791 _(
810 _(
792 b'(not using pooled storage: '
811 b'(not using pooled storage: '
793 b'remote appears to be empty)\n'
812 b'remote appears to be empty)\n'
794 )
813 )
795 )
814 )
796 except error.RepoLookupError:
815 except error.RepoLookupError:
797 ui.status(
816 ui.status(
798 _(
817 _(
799 b'(not using pooled storage: '
818 b'(not using pooled storage: '
800 b'unable to resolve identity of remote)\n'
819 b'unable to resolve identity of remote)\n'
801 )
820 )
802 )
821 )
803 elif sharenamemode == b'remote':
822 elif sharenamemode == b'remote':
804 sharepath = os.path.join(
823 sharepath = os.path.join(
805 sharepool, hex(hashutil.sha1(source).digest())
824 sharepool, hex(hashutil.sha1(source).digest())
806 )
825 )
807 else:
826 else:
808 raise error.Abort(
827 raise error.Abort(
809 _(b'unknown share naming mode: %s') % sharenamemode
828 _(b'unknown share naming mode: %s') % sharenamemode
810 )
829 )
811
830
812 # TODO this is a somewhat arbitrary restriction.
831 # TODO this is a somewhat arbitrary restriction.
813 if narrow:
832 if narrow:
814 ui.status(
833 ui.status(
815 _(b'(pooled storage not supported for narrow clones)\n')
834 _(b'(pooled storage not supported for narrow clones)\n')
816 )
835 )
817 sharepath = None
836 sharepath = None
818
837
819 if sharepath:
838 if sharepath:
820 return clonewithshare(
839 return clonewithshare(
821 ui,
840 ui,
822 peeropts,
841 peeropts,
823 sharepath,
842 sharepath,
824 source,
843 source,
825 srcpeer,
844 srcpeer,
826 dest,
845 dest,
827 pull=pull,
846 pull=pull,
828 rev=revs,
847 rev=revs,
829 update=update,
848 update=update,
830 stream=stream,
849 stream=stream,
831 )
850 )
832
851
833 srcrepo = srcpeer.local()
852 srcrepo = srcpeer.local()
834
853
835 abspath = origsource
854 abspath = origsource
836 if islocal(origsource):
855 if islocal(origsource):
837 abspath = util.abspath(urlutil.urllocalpath(origsource))
856 abspath = util.abspath(urlutil.urllocalpath(origsource))
838
857
839 if islocal(dest):
858 if islocal(dest):
840 if os.path.exists(dest):
859 if os.path.exists(dest):
841 # only clean up directories we create ourselves
860 # only clean up directories we create ourselves
842 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
861 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
843 cleandir = hgdir
862 cleandir = hgdir
844 else:
863 else:
845 cleandir = dest
864 cleandir = dest
846
865
847 copy = False
866 copy = False
848 if (
867 if (
849 srcrepo
868 srcrepo
850 and srcrepo.cancopy()
869 and srcrepo.cancopy()
851 and islocal(dest)
870 and islocal(dest)
852 and not phases.hassecret(srcrepo)
871 and not phases.hassecret(srcrepo)
853 ):
872 ):
854 copy = not pull and not revs
873 copy = not pull and not revs
855
874
856 # TODO this is a somewhat arbitrary restriction.
875 # TODO this is a somewhat arbitrary restriction.
857 if narrow:
876 if narrow:
858 copy = False
877 copy = False
859
878
860 if copy:
879 if copy:
861 try:
880 try:
862 # we use a lock here because if we race with commit, we
881 # we use a lock here because if we race with commit, we
863 # can end up with extra data in the cloned revlogs that's
882 # can end up with extra data in the cloned revlogs that's
864 # not pointed to by changesets, thus causing verify to
883 # not pointed to by changesets, thus causing verify to
865 # fail
884 # fail
866 srclock = srcrepo.lock(wait=False)
885 srclock = srcrepo.lock(wait=False)
867 except error.LockError:
886 except error.LockError:
868 copy = False
887 copy = False
869
888
870 if copy:
889 if copy:
871 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
890 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
872
891
873 destrootpath = urlutil.urllocalpath(dest)
892 destrootpath = urlutil.urllocalpath(dest)
874 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
893 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
875 localrepo.createrepository(
894 localrepo.createrepository(
876 ui,
895 ui,
877 destrootpath,
896 destrootpath,
878 requirements=dest_reqs,
897 requirements=dest_reqs,
879 )
898 )
880 destrepo = localrepo.makelocalrepository(ui, destrootpath)
899 destrepo = localrepo.makelocalrepository(ui, destrootpath)
881
900
882 destwlock = destrepo.wlock()
901 destwlock = destrepo.wlock()
883 destlock = destrepo.lock()
902 destlock = destrepo.lock()
884 from . import streamclone # avoid cycle
903 from . import streamclone # avoid cycle
885
904
886 streamclone.local_copy(srcrepo, destrepo)
905 streamclone.local_copy(srcrepo, destrepo)
887
906
888 # we need to re-init the repo after manually copying the data
907 # we need to re-init the repo after manually copying the data
889 # into it
908 # into it
890 destpeer = peer(srcrepo, peeropts, dest)
909 destpeer = peer(srcrepo, peeropts, dest)
891
910
892 # make the peer aware that is it already locked
911 # make the peer aware that is it already locked
893 #
912 #
894 # important:
913 # important:
895 #
914 #
896 # We still need to release that lock at the end of the function
915 # We still need to release that lock at the end of the function
897 destpeer.local()._lockref = weakref.ref(destlock)
916 destpeer.local()._lockref = weakref.ref(destlock)
898 destpeer.local()._wlockref = weakref.ref(destwlock)
917 destpeer.local()._wlockref = weakref.ref(destwlock)
899 # dirstate also needs to be copied because `_wlockref` has a reference
918 # dirstate also needs to be copied because `_wlockref` has a reference
900 # to it: this dirstate is saved to disk when the wlock is released
919 # to it: this dirstate is saved to disk when the wlock is released
901 destpeer.local().dirstate = destrepo.dirstate
920 destpeer.local().dirstate = destrepo.dirstate
902
921
903 srcrepo.hook(
922 srcrepo.hook(
904 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
923 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
905 )
924 )
906 else:
925 else:
907 try:
926 try:
908 # only pass ui when no srcrepo
927 # only pass ui when no srcrepo
909 destpeer = peer(
928 destpeer = peer(
910 srcrepo or ui,
929 srcrepo or ui,
911 peeropts,
930 peeropts,
912 dest,
931 dest,
913 create=True,
932 create=True,
914 createopts=createopts,
933 createopts=createopts,
915 )
934 )
916 except FileExistsError:
935 except FileExistsError:
917 cleandir = None
936 cleandir = None
918 raise error.Abort(_(b"destination '%s' already exists") % dest)
937 raise error.Abort(_(b"destination '%s' already exists") % dest)
919
938
920 if revs:
939 if revs:
921 if not srcpeer.capable(b'lookup'):
940 if not srcpeer.capable(b'lookup'):
922 raise error.Abort(
941 raise error.Abort(
923 _(
942 _(
924 b"src repository does not support "
943 b"src repository does not support "
925 b"revision lookup and so doesn't "
944 b"revision lookup and so doesn't "
926 b"support clone by revision"
945 b"support clone by revision"
927 )
946 )
928 )
947 )
929
948
930 # TODO this is batchable.
949 # TODO this is batchable.
931 remoterevs = []
950 remoterevs = []
932 for rev in revs:
951 for rev in revs:
933 with srcpeer.commandexecutor() as e:
952 with srcpeer.commandexecutor() as e:
934 remoterevs.append(
953 remoterevs.append(
935 e.callcommand(
954 e.callcommand(
936 b'lookup',
955 b'lookup',
937 {
956 {
938 b'key': rev,
957 b'key': rev,
939 },
958 },
940 ).result()
959 ).result()
941 )
960 )
942 revs = remoterevs
961 revs = remoterevs
943
962
944 checkout = revs[0]
963 checkout = revs[0]
945 else:
964 else:
946 revs = None
965 revs = None
947 local = destpeer.local()
966 local = destpeer.local()
948 if local:
967 if local:
949 if narrow:
968 if narrow:
950 with local.wlock(), local.lock():
969 with local.wlock(), local.lock():
951 local.setnarrowpats(storeincludepats, storeexcludepats)
970 local.setnarrowpats(storeincludepats, storeexcludepats)
952 narrowspec.copytoworkingcopy(local)
971 narrowspec.copytoworkingcopy(local)
953
972
954 u = urlutil.url(abspath)
973 u = urlutil.url(abspath)
955 defaulturl = bytes(u)
974 defaulturl = bytes(u)
956 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
975 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
957 if not stream:
976 if not stream:
958 if pull:
977 if pull:
959 stream = False
978 stream = False
960 else:
979 else:
961 stream = None
980 stream = None
962 # internal config: ui.quietbookmarkmove
981 # internal config: ui.quietbookmarkmove
963 overrides = {(b'ui', b'quietbookmarkmove'): True}
982 overrides = {(b'ui', b'quietbookmarkmove'): True}
964 with local.ui.configoverride(overrides, b'clone'):
983 with local.ui.configoverride(overrides, b'clone'):
965 exchange.pull(
984 exchange.pull(
966 local,
985 local,
967 srcpeer,
986 srcpeer,
968 heads=revs,
987 heads=revs,
969 streamclonerequested=stream,
988 streamclonerequested=stream,
970 includepats=storeincludepats,
989 includepats=storeincludepats,
971 excludepats=storeexcludepats,
990 excludepats=storeexcludepats,
972 depth=depth,
991 depth=depth,
973 )
992 )
974 elif srcrepo:
993 elif srcrepo:
975 # TODO lift restriction once exchange.push() accepts narrow
994 # TODO lift restriction once exchange.push() accepts narrow
976 # push.
995 # push.
977 if narrow:
996 if narrow:
978 raise error.Abort(
997 raise error.Abort(
979 _(
998 _(
980 b'narrow clone not available for '
999 b'narrow clone not available for '
981 b'remote destinations'
1000 b'remote destinations'
982 )
1001 )
983 )
1002 )
984
1003
985 exchange.push(
1004 exchange.push(
986 srcrepo,
1005 srcrepo,
987 destpeer,
1006 destpeer,
988 revs=revs,
1007 revs=revs,
989 bookmarks=srcrepo._bookmarks.keys(),
1008 bookmarks=srcrepo._bookmarks.keys(),
990 )
1009 )
991 else:
1010 else:
992 raise error.Abort(
1011 raise error.Abort(
993 _(b"clone from remote to remote not supported")
1012 _(b"clone from remote to remote not supported")
994 )
1013 )
995
1014
996 cleandir = None
1015 cleandir = None
997
1016
998 destrepo = destpeer.local()
1017 destrepo = destpeer.local()
999 if destrepo:
1018 if destrepo:
1000 template = uimod.samplehgrcs[b'cloned']
1019 template = uimod.samplehgrcs[b'cloned']
1001 u = urlutil.url(abspath)
1020 u = urlutil.url(abspath)
1002 u.passwd = None
1021 u.passwd = None
1003 defaulturl = bytes(u)
1022 defaulturl = bytes(u)
1004 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1023 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1005 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1024 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1006
1025
1007 if ui.configbool(b'experimental', b'remotenames'):
1026 if ui.configbool(b'experimental', b'remotenames'):
1008 logexchange.pullremotenames(destrepo, srcpeer)
1027 logexchange.pullremotenames(destrepo, srcpeer)
1009
1028
1010 if update:
1029 if update:
1011 if update is not True:
1030 if update is not True:
1012 with srcpeer.commandexecutor() as e:
1031 with srcpeer.commandexecutor() as e:
1013 checkout = e.callcommand(
1032 checkout = e.callcommand(
1014 b'lookup',
1033 b'lookup',
1015 {
1034 {
1016 b'key': update,
1035 b'key': update,
1017 },
1036 },
1018 ).result()
1037 ).result()
1019
1038
1020 uprev = None
1039 uprev = None
1021 status = None
1040 status = None
1022 if checkout is not None:
1041 if checkout is not None:
1023 # Some extensions (at least hg-git and hg-subversion) have
1042 # Some extensions (at least hg-git and hg-subversion) have
1024 # a peer.lookup() implementation that returns a name instead
1043 # a peer.lookup() implementation that returns a name instead
1025 # of a nodeid. We work around it here until we've figured
1044 # of a nodeid. We work around it here until we've figured
1026 # out a better solution.
1045 # out a better solution.
1027 if len(checkout) == 20 and checkout in destrepo:
1046 if len(checkout) == 20 and checkout in destrepo:
1028 uprev = checkout
1047 uprev = checkout
1029 elif scmutil.isrevsymbol(destrepo, checkout):
1048 elif scmutil.isrevsymbol(destrepo, checkout):
1030 uprev = scmutil.revsymbol(destrepo, checkout).node()
1049 uprev = scmutil.revsymbol(destrepo, checkout).node()
1031 else:
1050 else:
1032 if update is not True:
1051 if update is not True:
1033 try:
1052 try:
1034 uprev = destrepo.lookup(update)
1053 uprev = destrepo.lookup(update)
1035 except error.RepoLookupError:
1054 except error.RepoLookupError:
1036 pass
1055 pass
1037 if uprev is None:
1056 if uprev is None:
1038 try:
1057 try:
1039 if destrepo._activebookmark:
1058 if destrepo._activebookmark:
1040 uprev = destrepo.lookup(destrepo._activebookmark)
1059 uprev = destrepo.lookup(destrepo._activebookmark)
1041 update = destrepo._activebookmark
1060 update = destrepo._activebookmark
1042 else:
1061 else:
1043 uprev = destrepo._bookmarks[b'@']
1062 uprev = destrepo._bookmarks[b'@']
1044 update = b'@'
1063 update = b'@'
1045 bn = destrepo[uprev].branch()
1064 bn = destrepo[uprev].branch()
1046 if bn == b'default':
1065 if bn == b'default':
1047 status = _(b"updating to bookmark %s\n" % update)
1066 status = _(b"updating to bookmark %s\n" % update)
1048 else:
1067 else:
1049 status = (
1068 status = (
1050 _(b"updating to bookmark %s on branch %s\n")
1069 _(b"updating to bookmark %s on branch %s\n")
1051 ) % (update, bn)
1070 ) % (update, bn)
1052 except KeyError:
1071 except KeyError:
1053 try:
1072 try:
1054 uprev = destrepo.branchtip(b'default')
1073 uprev = destrepo.branchtip(b'default')
1055 except error.RepoLookupError:
1074 except error.RepoLookupError:
1056 uprev = destrepo.lookup(b'tip')
1075 uprev = destrepo.lookup(b'tip')
1057 if not status:
1076 if not status:
1058 bn = destrepo[uprev].branch()
1077 bn = destrepo[uprev].branch()
1059 status = _(b"updating to branch %s\n") % bn
1078 status = _(b"updating to branch %s\n") % bn
1060 destrepo.ui.status(status)
1079 destrepo.ui.status(status)
1061 _update(destrepo, uprev)
1080 _update(destrepo, uprev)
1062 if update in destrepo._bookmarks:
1081 if update in destrepo._bookmarks:
1063 bookmarks.activate(destrepo, update)
1082 bookmarks.activate(destrepo, update)
1064 if destlock is not None:
1083 if destlock is not None:
1065 release(destlock)
1084 release(destlock)
1066 if destwlock is not None:
1085 if destwlock is not None:
1067 release(destlock)
1086 release(destlock)
1068 # here is a tiny windows were someone could end up writing the
1087 # here is a tiny windows were someone could end up writing the
1069 # repository before the cache are sure to be warm. This is "fine"
1088 # repository before the cache are sure to be warm. This is "fine"
1070 # as the only "bad" outcome would be some slowness. That potential
1089 # as the only "bad" outcome would be some slowness. That potential
1071 # slowness already affect reader.
1090 # slowness already affect reader.
1072 with destrepo.lock():
1091 with destrepo.lock():
1073 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1092 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1074 finally:
1093 finally:
1075 release(srclock, destlock, destwlock)
1094 release(srclock, destlock, destwlock)
1076 if cleandir is not None:
1095 if cleandir is not None:
1077 shutil.rmtree(cleandir, True)
1096 shutil.rmtree(cleandir, True)
1078 if srcpeer is not None:
1097 if srcpeer is not None:
1079 srcpeer.close()
1098 srcpeer.close()
1080 if destpeer and destpeer.local() is None:
1099 if destpeer and destpeer.local() is None:
1081 destpeer.close()
1100 destpeer.close()
1082 return srcpeer, destpeer
1101 return srcpeer, destpeer
1083
1102
1084
1103
1085 def _showstats(repo, stats, quietempty=False):
1104 def _showstats(repo, stats, quietempty=False):
1086 if quietempty and stats.isempty():
1105 if quietempty and stats.isempty():
1087 return
1106 return
1088 repo.ui.status(
1107 repo.ui.status(
1089 _(
1108 _(
1090 b"%d files updated, %d files merged, "
1109 b"%d files updated, %d files merged, "
1091 b"%d files removed, %d files unresolved\n"
1110 b"%d files removed, %d files unresolved\n"
1092 )
1111 )
1093 % (
1112 % (
1094 stats.updatedcount,
1113 stats.updatedcount,
1095 stats.mergedcount,
1114 stats.mergedcount,
1096 stats.removedcount,
1115 stats.removedcount,
1097 stats.unresolvedcount,
1116 stats.unresolvedcount,
1098 )
1117 )
1099 )
1118 )
1100
1119
1101
1120
1102 def updaterepo(repo, node, overwrite, updatecheck=None):
1121 def updaterepo(repo, node, overwrite, updatecheck=None):
1103 """Update the working directory to node.
1122 """Update the working directory to node.
1104
1123
1105 When overwrite is set, changes are clobbered, merged else
1124 When overwrite is set, changes are clobbered, merged else
1106
1125
1107 returns stats (see pydoc mercurial.merge.applyupdates)"""
1126 returns stats (see pydoc mercurial.merge.applyupdates)"""
1108 repo.ui.deprecwarn(
1127 repo.ui.deprecwarn(
1109 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1128 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1110 b'5.7',
1129 b'5.7',
1111 )
1130 )
1112 return mergemod._update(
1131 return mergemod._update(
1113 repo,
1132 repo,
1114 node,
1133 node,
1115 branchmerge=False,
1134 branchmerge=False,
1116 force=overwrite,
1135 force=overwrite,
1117 labels=[b'working copy', b'destination'],
1136 labels=[b'working copy', b'destination'],
1118 updatecheck=updatecheck,
1137 updatecheck=updatecheck,
1119 )
1138 )
1120
1139
1121
1140
1122 def update(repo, node, quietempty=False, updatecheck=None):
1141 def update(repo, node, quietempty=False, updatecheck=None):
1123 """update the working directory to node"""
1142 """update the working directory to node"""
1124 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1143 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1125 _showstats(repo, stats, quietempty)
1144 _showstats(repo, stats, quietempty)
1126 if stats.unresolvedcount:
1145 if stats.unresolvedcount:
1127 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1146 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1128 return stats.unresolvedcount > 0
1147 return stats.unresolvedcount > 0
1129
1148
1130
1149
1131 # naming conflict in clone()
1150 # naming conflict in clone()
1132 _update = update
1151 _update = update
1133
1152
1134
1153
1135 def clean(repo, node, show_stats=True, quietempty=False):
1154 def clean(repo, node, show_stats=True, quietempty=False):
1136 """forcibly switch the working directory to node, clobbering changes"""
1155 """forcibly switch the working directory to node, clobbering changes"""
1137 stats = mergemod.clean_update(repo[node])
1156 stats = mergemod.clean_update(repo[node])
1138 assert stats.unresolvedcount == 0
1157 assert stats.unresolvedcount == 0
1139 if show_stats:
1158 if show_stats:
1140 _showstats(repo, stats, quietempty)
1159 _showstats(repo, stats, quietempty)
1141 return False
1160 return False
1142
1161
1143
1162
1144 # naming conflict in updatetotally()
1163 # naming conflict in updatetotally()
1145 _clean = clean
1164 _clean = clean
1146
1165
1147 _VALID_UPDATECHECKS = {
1166 _VALID_UPDATECHECKS = {
1148 mergemod.UPDATECHECK_ABORT,
1167 mergemod.UPDATECHECK_ABORT,
1149 mergemod.UPDATECHECK_NONE,
1168 mergemod.UPDATECHECK_NONE,
1150 mergemod.UPDATECHECK_LINEAR,
1169 mergemod.UPDATECHECK_LINEAR,
1151 mergemod.UPDATECHECK_NO_CONFLICT,
1170 mergemod.UPDATECHECK_NO_CONFLICT,
1152 }
1171 }
1153
1172
1154
1173
1155 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1174 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1156 """Update the working directory with extra care for non-file components
1175 """Update the working directory with extra care for non-file components
1157
1176
1158 This takes care of non-file components below:
1177 This takes care of non-file components below:
1159
1178
1160 :bookmark: might be advanced or (in)activated
1179 :bookmark: might be advanced or (in)activated
1161
1180
1162 This takes arguments below:
1181 This takes arguments below:
1163
1182
1164 :checkout: to which revision the working directory is updated
1183 :checkout: to which revision the working directory is updated
1165 :brev: a name, which might be a bookmark to be activated after updating
1184 :brev: a name, which might be a bookmark to be activated after updating
1166 :clean: whether changes in the working directory can be discarded
1185 :clean: whether changes in the working directory can be discarded
1167 :updatecheck: how to deal with a dirty working directory
1186 :updatecheck: how to deal with a dirty working directory
1168
1187
1169 Valid values for updatecheck are the UPDATECHECK_* constants
1188 Valid values for updatecheck are the UPDATECHECK_* constants
1170 defined in the merge module. Passing `None` will result in using the
1189 defined in the merge module. Passing `None` will result in using the
1171 configured default.
1190 configured default.
1172
1191
1173 * ABORT: abort if the working directory is dirty
1192 * ABORT: abort if the working directory is dirty
1174 * NONE: don't check (merge working directory changes into destination)
1193 * NONE: don't check (merge working directory changes into destination)
1175 * LINEAR: check that update is linear before merging working directory
1194 * LINEAR: check that update is linear before merging working directory
1176 changes into destination
1195 changes into destination
1177 * NO_CONFLICT: check that the update does not result in file merges
1196 * NO_CONFLICT: check that the update does not result in file merges
1178
1197
1179 This returns whether conflict is detected at updating or not.
1198 This returns whether conflict is detected at updating or not.
1180 """
1199 """
1181 if updatecheck is None:
1200 if updatecheck is None:
1182 updatecheck = ui.config(b'commands', b'update.check')
1201 updatecheck = ui.config(b'commands', b'update.check')
1183 if updatecheck not in _VALID_UPDATECHECKS:
1202 if updatecheck not in _VALID_UPDATECHECKS:
1184 # If not configured, or invalid value configured
1203 # If not configured, or invalid value configured
1185 updatecheck = mergemod.UPDATECHECK_LINEAR
1204 updatecheck = mergemod.UPDATECHECK_LINEAR
1186 if updatecheck not in _VALID_UPDATECHECKS:
1205 if updatecheck not in _VALID_UPDATECHECKS:
1187 raise ValueError(
1206 raise ValueError(
1188 r'Invalid updatecheck value %r (can accept %r)'
1207 r'Invalid updatecheck value %r (can accept %r)'
1189 % (updatecheck, _VALID_UPDATECHECKS)
1208 % (updatecheck, _VALID_UPDATECHECKS)
1190 )
1209 )
1191 with repo.wlock():
1210 with repo.wlock():
1192 movemarkfrom = None
1211 movemarkfrom = None
1193 warndest = False
1212 warndest = False
1194 if checkout is None:
1213 if checkout is None:
1195 updata = destutil.destupdate(repo, clean=clean)
1214 updata = destutil.destupdate(repo, clean=clean)
1196 checkout, movemarkfrom, brev = updata
1215 checkout, movemarkfrom, brev = updata
1197 warndest = True
1216 warndest = True
1198
1217
1199 if clean:
1218 if clean:
1200 ret = _clean(repo, checkout)
1219 ret = _clean(repo, checkout)
1201 else:
1220 else:
1202 if updatecheck == mergemod.UPDATECHECK_ABORT:
1221 if updatecheck == mergemod.UPDATECHECK_ABORT:
1203 cmdutil.bailifchanged(repo, merge=False)
1222 cmdutil.bailifchanged(repo, merge=False)
1204 updatecheck = mergemod.UPDATECHECK_NONE
1223 updatecheck = mergemod.UPDATECHECK_NONE
1205 ret = _update(repo, checkout, updatecheck=updatecheck)
1224 ret = _update(repo, checkout, updatecheck=updatecheck)
1206
1225
1207 if not ret and movemarkfrom:
1226 if not ret and movemarkfrom:
1208 if movemarkfrom == repo[b'.'].node():
1227 if movemarkfrom == repo[b'.'].node():
1209 pass # no-op update
1228 pass # no-op update
1210 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1229 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1211 b = ui.label(repo._activebookmark, b'bookmarks.active')
1230 b = ui.label(repo._activebookmark, b'bookmarks.active')
1212 ui.status(_(b"updating bookmark %s\n") % b)
1231 ui.status(_(b"updating bookmark %s\n") % b)
1213 else:
1232 else:
1214 # this can happen with a non-linear update
1233 # this can happen with a non-linear update
1215 b = ui.label(repo._activebookmark, b'bookmarks')
1234 b = ui.label(repo._activebookmark, b'bookmarks')
1216 ui.status(_(b"(leaving bookmark %s)\n") % b)
1235 ui.status(_(b"(leaving bookmark %s)\n") % b)
1217 bookmarks.deactivate(repo)
1236 bookmarks.deactivate(repo)
1218 elif brev in repo._bookmarks:
1237 elif brev in repo._bookmarks:
1219 if brev != repo._activebookmark:
1238 if brev != repo._activebookmark:
1220 b = ui.label(brev, b'bookmarks.active')
1239 b = ui.label(brev, b'bookmarks.active')
1221 ui.status(_(b"(activating bookmark %s)\n") % b)
1240 ui.status(_(b"(activating bookmark %s)\n") % b)
1222 bookmarks.activate(repo, brev)
1241 bookmarks.activate(repo, brev)
1223 elif brev:
1242 elif brev:
1224 if repo._activebookmark:
1243 if repo._activebookmark:
1225 b = ui.label(repo._activebookmark, b'bookmarks')
1244 b = ui.label(repo._activebookmark, b'bookmarks')
1226 ui.status(_(b"(leaving bookmark %s)\n") % b)
1245 ui.status(_(b"(leaving bookmark %s)\n") % b)
1227 bookmarks.deactivate(repo)
1246 bookmarks.deactivate(repo)
1228
1247
1229 if warndest:
1248 if warndest:
1230 destutil.statusotherdests(ui, repo)
1249 destutil.statusotherdests(ui, repo)
1231
1250
1232 return ret
1251 return ret
1233
1252
1234
1253
1235 def merge(
1254 def merge(
1236 ctx,
1255 ctx,
1237 force=False,
1256 force=False,
1238 remind=True,
1257 remind=True,
1239 labels=None,
1258 labels=None,
1240 ):
1259 ):
1241 """Branch merge with node, resolving changes. Return true if any
1260 """Branch merge with node, resolving changes. Return true if any
1242 unresolved conflicts."""
1261 unresolved conflicts."""
1243 repo = ctx.repo()
1262 repo = ctx.repo()
1244 stats = mergemod.merge(ctx, force=force, labels=labels)
1263 stats = mergemod.merge(ctx, force=force, labels=labels)
1245 _showstats(repo, stats)
1264 _showstats(repo, stats)
1246 if stats.unresolvedcount:
1265 if stats.unresolvedcount:
1247 repo.ui.status(
1266 repo.ui.status(
1248 _(
1267 _(
1249 b"use 'hg resolve' to retry unresolved file merges "
1268 b"use 'hg resolve' to retry unresolved file merges "
1250 b"or 'hg merge --abort' to abandon\n"
1269 b"or 'hg merge --abort' to abandon\n"
1251 )
1270 )
1252 )
1271 )
1253 elif remind:
1272 elif remind:
1254 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1273 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1255 return stats.unresolvedcount > 0
1274 return stats.unresolvedcount > 0
1256
1275
1257
1276
1258 def abortmerge(ui, repo):
1277 def abortmerge(ui, repo):
1259 ms = mergestatemod.mergestate.read(repo)
1278 ms = mergestatemod.mergestate.read(repo)
1260 if ms.active():
1279 if ms.active():
1261 # there were conflicts
1280 # there were conflicts
1262 node = ms.localctx.hex()
1281 node = ms.localctx.hex()
1263 else:
1282 else:
1264 # there were no conficts, mergestate was not stored
1283 # there were no conficts, mergestate was not stored
1265 node = repo[b'.'].hex()
1284 node = repo[b'.'].hex()
1266
1285
1267 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1286 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1268 stats = mergemod.clean_update(repo[node])
1287 stats = mergemod.clean_update(repo[node])
1269 assert stats.unresolvedcount == 0
1288 assert stats.unresolvedcount == 0
1270 _showstats(repo, stats)
1289 _showstats(repo, stats)
1271
1290
1272
1291
1273 def _incoming(
1292 def _incoming(
1274 displaychlist,
1293 displaychlist,
1275 subreporecurse,
1294 subreporecurse,
1276 ui,
1295 ui,
1277 repo,
1296 repo,
1278 source,
1297 source,
1279 opts,
1298 opts,
1280 buffered=False,
1299 buffered=False,
1281 subpath=None,
1300 subpath=None,
1282 ):
1301 ):
1283 """
1302 """
1284 Helper for incoming / gincoming.
1303 Helper for incoming / gincoming.
1285 displaychlist gets called with
1304 displaychlist gets called with
1286 (remoterepo, incomingchangesetlist, displayer) parameters,
1305 (remoterepo, incomingchangesetlist, displayer) parameters,
1287 and is supposed to contain only code that can't be unified.
1306 and is supposed to contain only code that can't be unified.
1288 """
1307 """
1289 srcs = urlutil.get_pull_paths(repo, ui, [source])
1308 srcs = urlutil.get_pull_paths(repo, ui, [source])
1290 srcs = list(srcs)
1309 srcs = list(srcs)
1291 if len(srcs) != 1:
1310 if len(srcs) != 1:
1292 msg = _(b'for now, incoming supports only a single source, %d provided')
1311 msg = _(b'for now, incoming supports only a single source, %d provided')
1293 msg %= len(srcs)
1312 msg %= len(srcs)
1294 raise error.Abort(msg)
1313 raise error.Abort(msg)
1295 path = srcs[0]
1314 path = srcs[0]
1296 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1315 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1297 if subpath is not None:
1316 if subpath is not None:
1298 subpath = urlutil.url(subpath)
1317 subpath = urlutil.url(subpath)
1299 if subpath.isabs():
1318 if subpath.isabs():
1300 source = bytes(subpath)
1319 source = bytes(subpath)
1301 else:
1320 else:
1302 p = urlutil.url(source)
1321 p = urlutil.url(source)
1303 if p.islocal():
1322 if p.islocal():
1304 normpath = os.path.normpath
1323 normpath = os.path.normpath
1305 else:
1324 else:
1306 normpath = posixpath.normpath
1325 normpath = posixpath.normpath
1307 p.path = normpath(b'%s/%s' % (p.path, subpath))
1326 p.path = normpath(b'%s/%s' % (p.path, subpath))
1308 source = bytes(p)
1327 source = bytes(p)
1309 other = peer(repo, opts, source)
1328 other = peer(repo, opts, source)
1310 cleanupfn = other.close
1329 cleanupfn = other.close
1311 try:
1330 try:
1312 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1331 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1313 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1332 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1314
1333
1315 if revs:
1334 if revs:
1316 revs = [other.lookup(rev) for rev in revs]
1335 revs = [other.lookup(rev) for rev in revs]
1317 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1336 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1318 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1337 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1319 )
1338 )
1320
1339
1321 if not chlist:
1340 if not chlist:
1322 ui.status(_(b"no changes found\n"))
1341 ui.status(_(b"no changes found\n"))
1323 return subreporecurse()
1342 return subreporecurse()
1324 ui.pager(b'incoming')
1343 ui.pager(b'incoming')
1325 displayer = logcmdutil.changesetdisplayer(
1344 displayer = logcmdutil.changesetdisplayer(
1326 ui, other, opts, buffered=buffered
1345 ui, other, opts, buffered=buffered
1327 )
1346 )
1328 displaychlist(other, chlist, displayer)
1347 displaychlist(other, chlist, displayer)
1329 displayer.close()
1348 displayer.close()
1330 finally:
1349 finally:
1331 cleanupfn()
1350 cleanupfn()
1332 subreporecurse()
1351 subreporecurse()
1333 return 0 # exit code is zero since we found incoming changes
1352 return 0 # exit code is zero since we found incoming changes
1334
1353
1335
1354
1336 def incoming(ui, repo, source, opts, subpath=None):
1355 def incoming(ui, repo, source, opts, subpath=None):
1337 def subreporecurse():
1356 def subreporecurse():
1338 ret = 1
1357 ret = 1
1339 if opts.get(b'subrepos'):
1358 if opts.get(b'subrepos'):
1340 ctx = repo[None]
1359 ctx = repo[None]
1341 for subpath in sorted(ctx.substate):
1360 for subpath in sorted(ctx.substate):
1342 sub = ctx.sub(subpath)
1361 sub = ctx.sub(subpath)
1343 ret = min(ret, sub.incoming(ui, source, opts))
1362 ret = min(ret, sub.incoming(ui, source, opts))
1344 return ret
1363 return ret
1345
1364
1346 def display(other, chlist, displayer):
1365 def display(other, chlist, displayer):
1347 limit = logcmdutil.getlimit(opts)
1366 limit = logcmdutil.getlimit(opts)
1348 if opts.get(b'newest_first'):
1367 if opts.get(b'newest_first'):
1349 chlist.reverse()
1368 chlist.reverse()
1350 count = 0
1369 count = 0
1351 for n in chlist:
1370 for n in chlist:
1352 if limit is not None and count >= limit:
1371 if limit is not None and count >= limit:
1353 break
1372 break
1354 parents = [
1373 parents = [
1355 p for p in other.changelog.parents(n) if p != repo.nullid
1374 p for p in other.changelog.parents(n) if p != repo.nullid
1356 ]
1375 ]
1357 if opts.get(b'no_merges') and len(parents) == 2:
1376 if opts.get(b'no_merges') and len(parents) == 2:
1358 continue
1377 continue
1359 count += 1
1378 count += 1
1360 displayer.show(other[n])
1379 displayer.show(other[n])
1361
1380
1362 return _incoming(
1381 return _incoming(
1363 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1382 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1364 )
1383 )
1365
1384
1366
1385
1367 def _outgoing(ui, repo, dests, opts, subpath=None):
1386 def _outgoing(ui, repo, dests, opts, subpath=None):
1368 out = set()
1387 out = set()
1369 others = []
1388 others = []
1370 for path in urlutil.get_push_paths(repo, ui, dests):
1389 for path in urlutil.get_push_paths(repo, ui, dests):
1371 dest = path.pushloc or path.loc
1390 dest = path.pushloc or path.loc
1372 if subpath is not None:
1391 if subpath is not None:
1373 subpath = urlutil.url(subpath)
1392 subpath = urlutil.url(subpath)
1374 if subpath.isabs():
1393 if subpath.isabs():
1375 dest = bytes(subpath)
1394 dest = bytes(subpath)
1376 else:
1395 else:
1377 p = urlutil.url(dest)
1396 p = urlutil.url(dest)
1378 if p.islocal():
1397 if p.islocal():
1379 normpath = os.path.normpath
1398 normpath = os.path.normpath
1380 else:
1399 else:
1381 normpath = posixpath.normpath
1400 normpath = posixpath.normpath
1382 p.path = normpath(b'%s/%s' % (p.path, subpath))
1401 p.path = normpath(b'%s/%s' % (p.path, subpath))
1383 dest = bytes(p)
1402 dest = bytes(p)
1384 branches = path.branch, opts.get(b'branch') or []
1403 branches = path.branch, opts.get(b'branch') or []
1385
1404
1386 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1405 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1387 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1406 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1388 if revs:
1407 if revs:
1389 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1408 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1390
1409
1391 other = peer(repo, opts, dest)
1410 other = peer(repo, opts, dest)
1392 try:
1411 try:
1393 outgoing = discovery.findcommonoutgoing(
1412 outgoing = discovery.findcommonoutgoing(
1394 repo, other, revs, force=opts.get(b'force')
1413 repo, other, revs, force=opts.get(b'force')
1395 )
1414 )
1396 o = outgoing.missing
1415 o = outgoing.missing
1397 out.update(o)
1416 out.update(o)
1398 if not o:
1417 if not o:
1399 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1418 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1400 others.append(other)
1419 others.append(other)
1401 except: # re-raises
1420 except: # re-raises
1402 other.close()
1421 other.close()
1403 raise
1422 raise
1404 # make sure this is ordered by revision number
1423 # make sure this is ordered by revision number
1405 outgoing_revs = list(out)
1424 outgoing_revs = list(out)
1406 cl = repo.changelog
1425 cl = repo.changelog
1407 outgoing_revs.sort(key=cl.rev)
1426 outgoing_revs.sort(key=cl.rev)
1408 return outgoing_revs, others
1427 return outgoing_revs, others
1409
1428
1410
1429
1411 def _outgoing_recurse(ui, repo, dests, opts):
1430 def _outgoing_recurse(ui, repo, dests, opts):
1412 ret = 1
1431 ret = 1
1413 if opts.get(b'subrepos'):
1432 if opts.get(b'subrepos'):
1414 ctx = repo[None]
1433 ctx = repo[None]
1415 for subpath in sorted(ctx.substate):
1434 for subpath in sorted(ctx.substate):
1416 sub = ctx.sub(subpath)
1435 sub = ctx.sub(subpath)
1417 ret = min(ret, sub.outgoing(ui, dests, opts))
1436 ret = min(ret, sub.outgoing(ui, dests, opts))
1418 return ret
1437 return ret
1419
1438
1420
1439
1421 def _outgoing_filter(repo, revs, opts):
1440 def _outgoing_filter(repo, revs, opts):
1422 """apply revision filtering/ordering option for outgoing"""
1441 """apply revision filtering/ordering option for outgoing"""
1423 limit = logcmdutil.getlimit(opts)
1442 limit = logcmdutil.getlimit(opts)
1424 no_merges = opts.get(b'no_merges')
1443 no_merges = opts.get(b'no_merges')
1425 if opts.get(b'newest_first'):
1444 if opts.get(b'newest_first'):
1426 revs.reverse()
1445 revs.reverse()
1427 if limit is None and not no_merges:
1446 if limit is None and not no_merges:
1428 for r in revs:
1447 for r in revs:
1429 yield r
1448 yield r
1430 return
1449 return
1431
1450
1432 count = 0
1451 count = 0
1433 cl = repo.changelog
1452 cl = repo.changelog
1434 for n in revs:
1453 for n in revs:
1435 if limit is not None and count >= limit:
1454 if limit is not None and count >= limit:
1436 break
1455 break
1437 parents = [p for p in cl.parents(n) if p != repo.nullid]
1456 parents = [p for p in cl.parents(n) if p != repo.nullid]
1438 if no_merges and len(parents) == 2:
1457 if no_merges and len(parents) == 2:
1439 continue
1458 continue
1440 count += 1
1459 count += 1
1441 yield n
1460 yield n
1442
1461
1443
1462
1444 def outgoing(ui, repo, dests, opts, subpath=None):
1463 def outgoing(ui, repo, dests, opts, subpath=None):
1445 if opts.get(b'graph'):
1464 if opts.get(b'graph'):
1446 logcmdutil.checkunsupportedgraphflags([], opts)
1465 logcmdutil.checkunsupportedgraphflags([], opts)
1447 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1466 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1448 ret = 1
1467 ret = 1
1449 try:
1468 try:
1450 if o:
1469 if o:
1451 ret = 0
1470 ret = 0
1452
1471
1453 if opts.get(b'graph'):
1472 if opts.get(b'graph'):
1454 revdag = logcmdutil.graphrevs(repo, o, opts)
1473 revdag = logcmdutil.graphrevs(repo, o, opts)
1455 ui.pager(b'outgoing')
1474 ui.pager(b'outgoing')
1456 displayer = logcmdutil.changesetdisplayer(
1475 displayer = logcmdutil.changesetdisplayer(
1457 ui, repo, opts, buffered=True
1476 ui, repo, opts, buffered=True
1458 )
1477 )
1459 logcmdutil.displaygraph(
1478 logcmdutil.displaygraph(
1460 ui, repo, revdag, displayer, graphmod.asciiedges
1479 ui, repo, revdag, displayer, graphmod.asciiedges
1461 )
1480 )
1462 else:
1481 else:
1463 ui.pager(b'outgoing')
1482 ui.pager(b'outgoing')
1464 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1483 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1465 for n in _outgoing_filter(repo, o, opts):
1484 for n in _outgoing_filter(repo, o, opts):
1466 displayer.show(repo[n])
1485 displayer.show(repo[n])
1467 displayer.close()
1486 displayer.close()
1468 for oth in others:
1487 for oth in others:
1469 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1488 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1470 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1489 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1471 return ret # exit code is zero since we found outgoing changes
1490 return ret # exit code is zero since we found outgoing changes
1472 finally:
1491 finally:
1473 for oth in others:
1492 for oth in others:
1474 oth.close()
1493 oth.close()
1475
1494
1476
1495
1477 def verify(repo, level=None):
1496 def verify(repo, level=None):
1478 """verify the consistency of a repository"""
1497 """verify the consistency of a repository"""
1479 ret = verifymod.verify(repo, level=level)
1498 ret = verifymod.verify(repo, level=level)
1480
1499
1481 # Broken subrepo references in hidden csets don't seem worth worrying about,
1500 # Broken subrepo references in hidden csets don't seem worth worrying about,
1482 # since they can't be pushed/pulled, and --hidden can be used if they are a
1501 # since they can't be pushed/pulled, and --hidden can be used if they are a
1483 # concern.
1502 # concern.
1484
1503
1485 # pathto() is needed for -R case
1504 # pathto() is needed for -R case
1486 revs = repo.revs(
1505 revs = repo.revs(
1487 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1506 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1488 )
1507 )
1489
1508
1490 if revs:
1509 if revs:
1491 repo.ui.status(_(b'checking subrepo links\n'))
1510 repo.ui.status(_(b'checking subrepo links\n'))
1492 for rev in revs:
1511 for rev in revs:
1493 ctx = repo[rev]
1512 ctx = repo[rev]
1494 try:
1513 try:
1495 for subpath in ctx.substate:
1514 for subpath in ctx.substate:
1496 try:
1515 try:
1497 ret = (
1516 ret = (
1498 ctx.sub(subpath, allowcreate=False).verify() or ret
1517 ctx.sub(subpath, allowcreate=False).verify() or ret
1499 )
1518 )
1500 except error.RepoError as e:
1519 except error.RepoError as e:
1501 repo.ui.warn(b'%d: %s\n' % (rev, e))
1520 repo.ui.warn(b'%d: %s\n' % (rev, e))
1502 except Exception:
1521 except Exception:
1503 repo.ui.warn(
1522 repo.ui.warn(
1504 _(b'.hgsubstate is corrupt in revision %s\n')
1523 _(b'.hgsubstate is corrupt in revision %s\n')
1505 % short(ctx.node())
1524 % short(ctx.node())
1506 )
1525 )
1507
1526
1508 return ret
1527 return ret
1509
1528
1510
1529
1511 def remoteui(src, opts):
1530 def remoteui(src, opts):
1512 """build a remote ui from ui or repo and opts"""
1531 """build a remote ui from ui or repo and opts"""
1513 if util.safehasattr(src, b'baseui'): # looks like a repository
1532 if util.safehasattr(src, b'baseui'): # looks like a repository
1514 dst = src.baseui.copy() # drop repo-specific config
1533 dst = src.baseui.copy() # drop repo-specific config
1515 src = src.ui # copy target options from repo
1534 src = src.ui # copy target options from repo
1516 else: # assume it's a global ui object
1535 else: # assume it's a global ui object
1517 dst = src.copy() # keep all global options
1536 dst = src.copy() # keep all global options
1518
1537
1519 # copy ssh-specific options
1538 # copy ssh-specific options
1520 for o in b'ssh', b'remotecmd':
1539 for o in b'ssh', b'remotecmd':
1521 v = opts.get(o) or src.config(b'ui', o)
1540 v = opts.get(o) or src.config(b'ui', o)
1522 if v:
1541 if v:
1523 dst.setconfig(b"ui", o, v, b'copied')
1542 dst.setconfig(b"ui", o, v, b'copied')
1524
1543
1525 # copy bundle-specific options
1544 # copy bundle-specific options
1526 r = src.config(b'bundle', b'mainreporoot')
1545 r = src.config(b'bundle', b'mainreporoot')
1527 if r:
1546 if r:
1528 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1547 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1529
1548
1530 # copy selected local settings to the remote ui
1549 # copy selected local settings to the remote ui
1531 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1550 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1532 for key, val in src.configitems(sect):
1551 for key, val in src.configitems(sect):
1533 dst.setconfig(sect, key, val, b'copied')
1552 dst.setconfig(sect, key, val, b'copied')
1534 v = src.config(b'web', b'cacerts')
1553 v = src.config(b'web', b'cacerts')
1535 if v:
1554 if v:
1536 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1555 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1537
1556
1538 return dst
1557 return dst
1539
1558
1540
1559
1541 # Files of interest
1560 # Files of interest
1542 # Used to check if the repository has changed looking at mtime and size of
1561 # Used to check if the repository has changed looking at mtime and size of
1543 # these files.
1562 # these files.
1544 foi = [
1563 foi = [
1545 (b'spath', b'00changelog.i'),
1564 (b'spath', b'00changelog.i'),
1546 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1565 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1547 (b'spath', b'obsstore'),
1566 (b'spath', b'obsstore'),
1548 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1567 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1549 ]
1568 ]
1550
1569
1551
1570
1552 class cachedlocalrepo:
1571 class cachedlocalrepo:
1553 """Holds a localrepository that can be cached and reused."""
1572 """Holds a localrepository that can be cached and reused."""
1554
1573
1555 def __init__(self, repo):
1574 def __init__(self, repo):
1556 """Create a new cached repo from an existing repo.
1575 """Create a new cached repo from an existing repo.
1557
1576
1558 We assume the passed in repo was recently created. If the
1577 We assume the passed in repo was recently created. If the
1559 repo has changed between when it was created and when it was
1578 repo has changed between when it was created and when it was
1560 turned into a cache, it may not refresh properly.
1579 turned into a cache, it may not refresh properly.
1561 """
1580 """
1562 assert isinstance(repo, localrepo.localrepository)
1581 assert isinstance(repo, localrepo.localrepository)
1563 self._repo = repo
1582 self._repo = repo
1564 self._state, self.mtime = self._repostate()
1583 self._state, self.mtime = self._repostate()
1565 self._filtername = repo.filtername
1584 self._filtername = repo.filtername
1566
1585
1567 def fetch(self):
1586 def fetch(self):
1568 """Refresh (if necessary) and return a repository.
1587 """Refresh (if necessary) and return a repository.
1569
1588
1570 If the cached instance is out of date, it will be recreated
1589 If the cached instance is out of date, it will be recreated
1571 automatically and returned.
1590 automatically and returned.
1572
1591
1573 Returns a tuple of the repo and a boolean indicating whether a new
1592 Returns a tuple of the repo and a boolean indicating whether a new
1574 repo instance was created.
1593 repo instance was created.
1575 """
1594 """
1576 # We compare the mtimes and sizes of some well-known files to
1595 # We compare the mtimes and sizes of some well-known files to
1577 # determine if the repo changed. This is not precise, as mtimes
1596 # determine if the repo changed. This is not precise, as mtimes
1578 # are susceptible to clock skew and imprecise filesystems and
1597 # are susceptible to clock skew and imprecise filesystems and
1579 # file content can change while maintaining the same size.
1598 # file content can change while maintaining the same size.
1580
1599
1581 state, mtime = self._repostate()
1600 state, mtime = self._repostate()
1582 if state == self._state:
1601 if state == self._state:
1583 return self._repo, False
1602 return self._repo, False
1584
1603
1585 repo = repository(self._repo.baseui, self._repo.url())
1604 repo = repository(self._repo.baseui, self._repo.url())
1586 if self._filtername:
1605 if self._filtername:
1587 self._repo = repo.filtered(self._filtername)
1606 self._repo = repo.filtered(self._filtername)
1588 else:
1607 else:
1589 self._repo = repo.unfiltered()
1608 self._repo = repo.unfiltered()
1590 self._state = state
1609 self._state = state
1591 self.mtime = mtime
1610 self.mtime = mtime
1592
1611
1593 return self._repo, True
1612 return self._repo, True
1594
1613
1595 def _repostate(self):
1614 def _repostate(self):
1596 state = []
1615 state = []
1597 maxmtime = -1
1616 maxmtime = -1
1598 for attr, fname in foi:
1617 for attr, fname in foi:
1599 prefix = getattr(self._repo, attr)
1618 prefix = getattr(self._repo, attr)
1600 p = os.path.join(prefix, fname)
1619 p = os.path.join(prefix, fname)
1601 try:
1620 try:
1602 st = os.stat(p)
1621 st = os.stat(p)
1603 except OSError:
1622 except OSError:
1604 st = os.stat(prefix)
1623 st = os.stat(prefix)
1605 state.append((st[stat.ST_MTIME], st.st_size))
1624 state.append((st[stat.ST_MTIME], st.st_size))
1606 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1625 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1607
1626
1608 return tuple(state), maxmtime
1627 return tuple(state), maxmtime
1609
1628
1610 def copy(self):
1629 def copy(self):
1611 """Obtain a copy of this class instance.
1630 """Obtain a copy of this class instance.
1612
1631
1613 A new localrepository instance is obtained. The new instance should be
1632 A new localrepository instance is obtained. The new instance should be
1614 completely independent of the original.
1633 completely independent of the original.
1615 """
1634 """
1616 repo = repository(self._repo.baseui, self._repo.origroot)
1635 repo = repository(self._repo.baseui, self._repo.origroot)
1617 if self._filtername:
1636 if self._filtername:
1618 repo = repo.filtered(self._filtername)
1637 repo = repo.filtered(self._filtername)
1619 else:
1638 else:
1620 repo = repo.unfiltered()
1639 repo = repo.unfiltered()
1621 c = cachedlocalrepo(repo)
1640 c = cachedlocalrepo(repo)
1622 c._state = self._state
1641 c._state = self._state
1623 c.mtime = self.mtime
1642 c.mtime = self.mtime
1624 return c
1643 return c
General Comments 0
You need to be logged in to leave comments. Login now