##// END OF EJS Templates
path: directly use the push_variant in outgoing internals...
marmoute -
r50596:1470a533 default
parent child Browse files
Show More
@@ -1,1636 +1,1636 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 peer = other.peer() # a courtesy to callers using a localrepo for other
69 peer = other.peer() # a courtesy to callers using a localrepo for other
70 hashbranch, branches = branches
70 hashbranch, branches = branches
71 if not hashbranch and not branches:
71 if not hashbranch and not branches:
72 x = revs or None
72 x = revs or None
73 if revs:
73 if revs:
74 y = revs[0]
74 y = revs[0]
75 else:
75 else:
76 y = None
76 y = None
77 return x, y
77 return x, y
78 if revs:
78 if revs:
79 revs = list(revs)
79 revs = list(revs)
80 else:
80 else:
81 revs = []
81 revs = []
82
82
83 if not peer.capable(b'branchmap'):
83 if not peer.capable(b'branchmap'):
84 if branches:
84 if branches:
85 raise error.Abort(_(b"remote branch lookup not supported"))
85 raise error.Abort(_(b"remote branch lookup not supported"))
86 revs.append(hashbranch)
86 revs.append(hashbranch)
87 return revs, revs[0]
87 return revs, revs[0]
88
88
89 with peer.commandexecutor() as e:
89 with peer.commandexecutor() as e:
90 branchmap = e.callcommand(b'branchmap', {}).result()
90 branchmap = e.callcommand(b'branchmap', {}).result()
91
91
92 def primary(branch):
92 def primary(branch):
93 if branch == b'.':
93 if branch == b'.':
94 if not lrepo:
94 if not lrepo:
95 raise error.Abort(_(b"dirstate branch not accessible"))
95 raise error.Abort(_(b"dirstate branch not accessible"))
96 branch = lrepo.dirstate.branch()
96 branch = lrepo.dirstate.branch()
97 if branch in branchmap:
97 if branch in branchmap:
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
98 revs.extend(hex(r) for r in reversed(branchmap[branch]))
99 return True
99 return True
100 else:
100 else:
101 return False
101 return False
102
102
103 for branch in branches:
103 for branch in branches:
104 if not primary(branch):
104 if not primary(branch):
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
105 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
106 if hashbranch:
106 if hashbranch:
107 if not primary(hashbranch):
107 if not primary(hashbranch):
108 revs.append(hashbranch)
108 revs.append(hashbranch)
109 return revs, revs[0]
109 return revs, revs[0]
110
110
111
111
112 def _isfile(path):
112 def _isfile(path):
113 try:
113 try:
114 # we use os.stat() directly here instead of os.path.isfile()
114 # we use os.stat() directly here instead of os.path.isfile()
115 # because the latter started returning `False` on invalid path
115 # because the latter started returning `False` on invalid path
116 # exceptions starting in 3.8 and we care about handling
116 # exceptions starting in 3.8 and we care about handling
117 # invalid paths specially here.
117 # invalid paths specially here.
118 st = os.stat(path)
118 st = os.stat(path)
119 except ValueError as e:
119 except ValueError as e:
120 msg = stringutil.forcebytestr(e)
120 msg = stringutil.forcebytestr(e)
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
121 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
122 except OSError:
122 except OSError:
123 return False
123 return False
124 else:
124 else:
125 return stat.S_ISREG(st.st_mode)
125 return stat.S_ISREG(st.st_mode)
126
126
127
127
128 class LocalFactory:
128 class LocalFactory:
129 """thin wrapper to dispatch between localrepo and bundle repo"""
129 """thin wrapper to dispatch between localrepo and bundle repo"""
130
130
131 @staticmethod
131 @staticmethod
132 def islocal(path: bytes) -> bool:
132 def islocal(path: bytes) -> bool:
133 path = util.expandpath(urlutil.urllocalpath(path))
133 path = util.expandpath(urlutil.urllocalpath(path))
134 return not _isfile(path)
134 return not _isfile(path)
135
135
136 @staticmethod
136 @staticmethod
137 def instance(ui, path, *args, **kwargs):
137 def instance(ui, path, *args, **kwargs):
138 path = util.expandpath(urlutil.urllocalpath(path))
138 path = util.expandpath(urlutil.urllocalpath(path))
139 if _isfile(path):
139 if _isfile(path):
140 cls = bundlerepo
140 cls = bundlerepo
141 else:
141 else:
142 cls = localrepo
142 cls = localrepo
143 return cls.instance(ui, path, *args, **kwargs)
143 return cls.instance(ui, path, *args, **kwargs)
144
144
145
145
146 repo_schemes = {
146 repo_schemes = {
147 b'bundle': bundlerepo,
147 b'bundle': bundlerepo,
148 b'union': unionrepo,
148 b'union': unionrepo,
149 b'file': LocalFactory,
149 b'file': LocalFactory,
150 }
150 }
151
151
152 peer_schemes = {
152 peer_schemes = {
153 b'http': httppeer,
153 b'http': httppeer,
154 b'https': httppeer,
154 b'https': httppeer,
155 b'ssh': sshpeer,
155 b'ssh': sshpeer,
156 b'static-http': statichttprepo,
156 b'static-http': statichttprepo,
157 }
157 }
158
158
159
159
160 def _peerlookup(path):
160 def _peerlookup(path):
161 u = urlutil.url(path)
161 u = urlutil.url(path)
162 scheme = u.scheme or b'file'
162 scheme = u.scheme or b'file'
163 if scheme in peer_schemes:
163 if scheme in peer_schemes:
164 return peer_schemes[scheme]
164 return peer_schemes[scheme]
165 if scheme in repo_schemes:
165 if scheme in repo_schemes:
166 return repo_schemes[scheme]
166 return repo_schemes[scheme]
167 return LocalFactory
167 return LocalFactory
168
168
169
169
170 def islocal(repo):
170 def islocal(repo):
171 '''return true if repo (or path pointing to repo) is local'''
171 '''return true if repo (or path pointing to repo) is local'''
172 if isinstance(repo, bytes):
172 if isinstance(repo, bytes):
173 cls = _peerlookup(repo)
173 cls = _peerlookup(repo)
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 if util.safehasattr(cls, 'islocal'):
175 if util.safehasattr(cls, 'islocal'):
176 return cls.islocal(repo) # pytype: disable=module-attr
176 return cls.islocal(repo) # pytype: disable=module-attr
177 return False
177 return False
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
178 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 return repo.local()
179 return repo.local()
180
180
181
181
182 def openpath(ui, path, sendaccept=True):
182 def openpath(ui, path, sendaccept=True):
183 '''open path with open if local, url.open if remote'''
183 '''open path with open if local, url.open if remote'''
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
184 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 if pathurl.islocal():
185 if pathurl.islocal():
186 return util.posixfile(pathurl.localpath(), b'rb')
186 return util.posixfile(pathurl.localpath(), b'rb')
187 else:
187 else:
188 return url.open(ui, path, sendaccept=sendaccept)
188 return url.open(ui, path, sendaccept=sendaccept)
189
189
190
190
191 # a list of (ui, repo) functions called for wire peer initialization
191 # a list of (ui, repo) functions called for wire peer initialization
192 wirepeersetupfuncs = []
192 wirepeersetupfuncs = []
193
193
194
194
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
195 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 ui = getattr(obj, "ui", ui)
196 ui = getattr(obj, "ui", ui)
197 for f in presetupfuncs or []:
197 for f in presetupfuncs or []:
198 f(ui, obj)
198 f(ui, obj)
199 ui.log(b'extension', b'- executing reposetup hooks\n')
199 ui.log(b'extension', b'- executing reposetup hooks\n')
200 with util.timedcm('all reposetup') as allreposetupstats:
200 with util.timedcm('all reposetup') as allreposetupstats:
201 for name, module in extensions.extensions(ui):
201 for name, module in extensions.extensions(ui):
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 hook = getattr(module, 'reposetup', None)
203 hook = getattr(module, 'reposetup', None)
204 if hook:
204 if hook:
205 with util.timedcm('reposetup %r', name) as stats:
205 with util.timedcm('reposetup %r', name) as stats:
206 hook(ui, obj)
206 hook(ui, obj)
207 msg = b' > reposetup for %s took %s\n'
207 msg = b' > reposetup for %s took %s\n'
208 ui.log(b'extension', msg, name, stats)
208 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213
213
214
214
215 def repository(
215 def repository(
216 ui,
216 ui,
217 path=b'',
217 path=b'',
218 create=False,
218 create=False,
219 presetupfuncs=None,
219 presetupfuncs=None,
220 intents=None,
220 intents=None,
221 createopts=None,
221 createopts=None,
222 ):
222 ):
223 """return a repository object for the specified path"""
223 """return a repository object for the specified path"""
224 scheme = urlutil.url(path).scheme
224 scheme = urlutil.url(path).scheme
225 if scheme is None:
225 if scheme is None:
226 scheme = b'file'
226 scheme = b'file'
227 cls = repo_schemes.get(scheme)
227 cls = repo_schemes.get(scheme)
228 if cls is None:
228 if cls is None:
229 if scheme in peer_schemes:
229 if scheme in peer_schemes:
230 raise error.Abort(_(b"repository '%s' is not local") % path)
230 raise error.Abort(_(b"repository '%s' is not local") % path)
231 cls = LocalFactory
231 cls = LocalFactory
232 repo = cls.instance(
232 repo = cls.instance(
233 ui,
233 ui,
234 path,
234 path,
235 create,
235 create,
236 intents=intents,
236 intents=intents,
237 createopts=createopts,
237 createopts=createopts,
238 )
238 )
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
239 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 return repo.filtered(b'visible')
240 return repo.filtered(b'visible')
241
241
242
242
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
243 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
244 '''return a repository peer for the specified path'''
244 '''return a repository peer for the specified path'''
245 rui = remoteui(uiorrepo, opts)
245 rui = remoteui(uiorrepo, opts)
246 scheme = urlutil.url(path).scheme
246 scheme = urlutil.url(path).scheme
247 if scheme in peer_schemes:
247 if scheme in peer_schemes:
248 cls = peer_schemes[scheme]
248 cls = peer_schemes[scheme]
249 peer = cls.instance(
249 peer = cls.instance(
250 rui,
250 rui,
251 path,
251 path,
252 create,
252 create,
253 intents=intents,
253 intents=intents,
254 createopts=createopts,
254 createopts=createopts,
255 )
255 )
256 _setup_repo_or_peer(rui, peer)
256 _setup_repo_or_peer(rui, peer)
257 else:
257 else:
258 # this is a repository
258 # this is a repository
259 repo = repository(
259 repo = repository(
260 rui,
260 rui,
261 path,
261 path,
262 create,
262 create,
263 intents=intents,
263 intents=intents,
264 createopts=createopts,
264 createopts=createopts,
265 )
265 )
266 peer = repo.peer()
266 peer = repo.peer()
267 return peer
267 return peer
268
268
269
269
270 def defaultdest(source):
270 def defaultdest(source):
271 """return default destination of clone if none is given
271 """return default destination of clone if none is given
272
272
273 >>> defaultdest(b'foo')
273 >>> defaultdest(b'foo')
274 'foo'
274 'foo'
275 >>> defaultdest(b'/foo/bar')
275 >>> defaultdest(b'/foo/bar')
276 'bar'
276 'bar'
277 >>> defaultdest(b'/')
277 >>> defaultdest(b'/')
278 ''
278 ''
279 >>> defaultdest(b'')
279 >>> defaultdest(b'')
280 ''
280 ''
281 >>> defaultdest(b'http://example.org/')
281 >>> defaultdest(b'http://example.org/')
282 ''
282 ''
283 >>> defaultdest(b'http://example.org/foo/')
283 >>> defaultdest(b'http://example.org/foo/')
284 'foo'
284 'foo'
285 """
285 """
286 path = urlutil.url(source).path
286 path = urlutil.url(source).path
287 if not path:
287 if not path:
288 return b''
288 return b''
289 return os.path.basename(os.path.normpath(path))
289 return os.path.basename(os.path.normpath(path))
290
290
291
291
292 def sharedreposource(repo):
292 def sharedreposource(repo):
293 """Returns repository object for source repository of a shared repo.
293 """Returns repository object for source repository of a shared repo.
294
294
295 If repo is not a shared repository, returns None.
295 If repo is not a shared repository, returns None.
296 """
296 """
297 if repo.sharedpath == repo.path:
297 if repo.sharedpath == repo.path:
298 return None
298 return None
299
299
300 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
300 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
301 return repo.srcrepo
301 return repo.srcrepo
302
302
303 # the sharedpath always ends in the .hg; we want the path to the repo
303 # the sharedpath always ends in the .hg; we want the path to the repo
304 source = repo.vfs.split(repo.sharedpath)[0]
304 source = repo.vfs.split(repo.sharedpath)[0]
305 srcurl, branches = urlutil.parseurl(source)
305 srcurl, branches = urlutil.parseurl(source)
306 srcrepo = repository(repo.ui, srcurl)
306 srcrepo = repository(repo.ui, srcurl)
307 repo.srcrepo = srcrepo
307 repo.srcrepo = srcrepo
308 return srcrepo
308 return srcrepo
309
309
310
310
311 def share(
311 def share(
312 ui,
312 ui,
313 source,
313 source,
314 dest=None,
314 dest=None,
315 update=True,
315 update=True,
316 bookmarks=True,
316 bookmarks=True,
317 defaultpath=None,
317 defaultpath=None,
318 relative=False,
318 relative=False,
319 ):
319 ):
320 '''create a shared repository'''
320 '''create a shared repository'''
321
321
322 not_local_msg = _(b'can only share local repositories')
322 not_local_msg = _(b'can only share local repositories')
323 if util.safehasattr(source, 'local'):
323 if util.safehasattr(source, 'local'):
324 if source.local() is None:
324 if source.local() is None:
325 raise error.Abort(not_local_msg)
325 raise error.Abort(not_local_msg)
326 elif not islocal(source):
326 elif not islocal(source):
327 # XXX why are we getting bytes here ?
327 # XXX why are we getting bytes here ?
328 raise error.Abort(not_local_msg)
328 raise error.Abort(not_local_msg)
329
329
330 if not dest:
330 if not dest:
331 dest = defaultdest(source)
331 dest = defaultdest(source)
332 else:
332 else:
333 dest = urlutil.get_clone_path(ui, dest)[1]
333 dest = urlutil.get_clone_path(ui, dest)[1]
334
334
335 if isinstance(source, bytes):
335 if isinstance(source, bytes):
336 origsource, source, branches = urlutil.get_clone_path(ui, source)
336 origsource, source, branches = urlutil.get_clone_path(ui, source)
337 srcrepo = repository(ui, source)
337 srcrepo = repository(ui, source)
338 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
338 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
339 else:
339 else:
340 srcrepo = source.local()
340 srcrepo = source.local()
341 checkout = None
341 checkout = None
342
342
343 shareditems = set()
343 shareditems = set()
344 if bookmarks:
344 if bookmarks:
345 shareditems.add(sharedbookmarks)
345 shareditems.add(sharedbookmarks)
346
346
347 r = repository(
347 r = repository(
348 ui,
348 ui,
349 dest,
349 dest,
350 create=True,
350 create=True,
351 createopts={
351 createopts={
352 b'sharedrepo': srcrepo,
352 b'sharedrepo': srcrepo,
353 b'sharedrelative': relative,
353 b'sharedrelative': relative,
354 b'shareditems': shareditems,
354 b'shareditems': shareditems,
355 },
355 },
356 )
356 )
357
357
358 postshare(srcrepo, r, defaultpath=defaultpath)
358 postshare(srcrepo, r, defaultpath=defaultpath)
359 r = repository(ui, dest)
359 r = repository(ui, dest)
360 _postshareupdate(r, update, checkout=checkout)
360 _postshareupdate(r, update, checkout=checkout)
361 return r
361 return r
362
362
363
363
364 def _prependsourcehgrc(repo):
364 def _prependsourcehgrc(repo):
365 """copies the source repo config and prepend it in current repo .hg/hgrc
365 """copies the source repo config and prepend it in current repo .hg/hgrc
366 on unshare. This is only done if the share was perfomed using share safe
366 on unshare. This is only done if the share was perfomed using share safe
367 method where we share config of source in shares"""
367 method where we share config of source in shares"""
368 srcvfs = vfsmod.vfs(repo.sharedpath)
368 srcvfs = vfsmod.vfs(repo.sharedpath)
369 dstvfs = vfsmod.vfs(repo.path)
369 dstvfs = vfsmod.vfs(repo.path)
370
370
371 if not srcvfs.exists(b'hgrc'):
371 if not srcvfs.exists(b'hgrc'):
372 return
372 return
373
373
374 currentconfig = b''
374 currentconfig = b''
375 if dstvfs.exists(b'hgrc'):
375 if dstvfs.exists(b'hgrc'):
376 currentconfig = dstvfs.read(b'hgrc')
376 currentconfig = dstvfs.read(b'hgrc')
377
377
378 with dstvfs(b'hgrc', b'wb') as fp:
378 with dstvfs(b'hgrc', b'wb') as fp:
379 sourceconfig = srcvfs.read(b'hgrc')
379 sourceconfig = srcvfs.read(b'hgrc')
380 fp.write(b"# Config copied from shared source\n")
380 fp.write(b"# Config copied from shared source\n")
381 fp.write(sourceconfig)
381 fp.write(sourceconfig)
382 fp.write(b'\n')
382 fp.write(b'\n')
383 fp.write(currentconfig)
383 fp.write(currentconfig)
384
384
385
385
386 def unshare(ui, repo):
386 def unshare(ui, repo):
387 """convert a shared repository to a normal one
387 """convert a shared repository to a normal one
388
388
389 Copy the store data to the repo and remove the sharedpath data.
389 Copy the store data to the repo and remove the sharedpath data.
390
390
391 Returns a new repository object representing the unshared repository.
391 Returns a new repository object representing the unshared repository.
392
392
393 The passed repository object is not usable after this function is
393 The passed repository object is not usable after this function is
394 called.
394 called.
395 """
395 """
396
396
397 with repo.lock():
397 with repo.lock():
398 # we use locks here because if we race with commit, we
398 # we use locks here because if we race with commit, we
399 # can end up with extra data in the cloned revlogs that's
399 # can end up with extra data in the cloned revlogs that's
400 # not pointed to by changesets, thus causing verify to
400 # not pointed to by changesets, thus causing verify to
401 # fail
401 # fail
402 destlock = copystore(ui, repo, repo.path)
402 destlock = copystore(ui, repo, repo.path)
403 with destlock or util.nullcontextmanager():
403 with destlock or util.nullcontextmanager():
404 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
404 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
405 # we were sharing .hg/hgrc of the share source with the current
405 # we were sharing .hg/hgrc of the share source with the current
406 # repo. We need to copy that while unsharing otherwise it can
406 # repo. We need to copy that while unsharing otherwise it can
407 # disable hooks and other checks
407 # disable hooks and other checks
408 _prependsourcehgrc(repo)
408 _prependsourcehgrc(repo)
409
409
410 sharefile = repo.vfs.join(b'sharedpath')
410 sharefile = repo.vfs.join(b'sharedpath')
411 util.rename(sharefile, sharefile + b'.old')
411 util.rename(sharefile, sharefile + b'.old')
412
412
413 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
413 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
414 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
414 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
415 scmutil.writereporequirements(repo)
415 scmutil.writereporequirements(repo)
416
416
417 # Removing share changes some fundamental properties of the repo instance.
417 # Removing share changes some fundamental properties of the repo instance.
418 # So we instantiate a new repo object and operate on it rather than
418 # So we instantiate a new repo object and operate on it rather than
419 # try to keep the existing repo usable.
419 # try to keep the existing repo usable.
420 newrepo = repository(repo.baseui, repo.root, create=False)
420 newrepo = repository(repo.baseui, repo.root, create=False)
421
421
422 # TODO: figure out how to access subrepos that exist, but were previously
422 # TODO: figure out how to access subrepos that exist, but were previously
423 # removed from .hgsub
423 # removed from .hgsub
424 c = newrepo[b'.']
424 c = newrepo[b'.']
425 subs = c.substate
425 subs = c.substate
426 for s in sorted(subs):
426 for s in sorted(subs):
427 c.sub(s).unshare()
427 c.sub(s).unshare()
428
428
429 localrepo.poisonrepository(repo)
429 localrepo.poisonrepository(repo)
430
430
431 return newrepo
431 return newrepo
432
432
433
433
434 def postshare(sourcerepo, destrepo, defaultpath=None):
434 def postshare(sourcerepo, destrepo, defaultpath=None):
435 """Called after a new shared repo is created.
435 """Called after a new shared repo is created.
436
436
437 The new repo only has a requirements file and pointer to the source.
437 The new repo only has a requirements file and pointer to the source.
438 This function configures additional shared data.
438 This function configures additional shared data.
439
439
440 Extensions can wrap this function and write additional entries to
440 Extensions can wrap this function and write additional entries to
441 destrepo/.hg/shared to indicate additional pieces of data to be shared.
441 destrepo/.hg/shared to indicate additional pieces of data to be shared.
442 """
442 """
443 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
443 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
444 if default:
444 if default:
445 template = b'[paths]\ndefault = %s\n'
445 template = b'[paths]\ndefault = %s\n'
446 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
446 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
447 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
447 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
448 with destrepo.wlock():
448 with destrepo.wlock():
449 narrowspec.copytoworkingcopy(destrepo)
449 narrowspec.copytoworkingcopy(destrepo)
450
450
451
451
452 def _postshareupdate(repo, update, checkout=None):
452 def _postshareupdate(repo, update, checkout=None):
453 """Maybe perform a working directory update after a shared repo is created.
453 """Maybe perform a working directory update after a shared repo is created.
454
454
455 ``update`` can be a boolean or a revision to update to.
455 ``update`` can be a boolean or a revision to update to.
456 """
456 """
457 if not update:
457 if not update:
458 return
458 return
459
459
460 repo.ui.status(_(b"updating working directory\n"))
460 repo.ui.status(_(b"updating working directory\n"))
461 if update is not True:
461 if update is not True:
462 checkout = update
462 checkout = update
463 for test in (checkout, b'default', b'tip'):
463 for test in (checkout, b'default', b'tip'):
464 if test is None:
464 if test is None:
465 continue
465 continue
466 try:
466 try:
467 uprev = repo.lookup(test)
467 uprev = repo.lookup(test)
468 break
468 break
469 except error.RepoLookupError:
469 except error.RepoLookupError:
470 continue
470 continue
471 _update(repo, uprev)
471 _update(repo, uprev)
472
472
473
473
474 def copystore(ui, srcrepo, destpath):
474 def copystore(ui, srcrepo, destpath):
475 """copy files from store of srcrepo in destpath
475 """copy files from store of srcrepo in destpath
476
476
477 returns destlock
477 returns destlock
478 """
478 """
479 destlock = None
479 destlock = None
480 try:
480 try:
481 hardlink = None
481 hardlink = None
482 topic = _(b'linking') if hardlink else _(b'copying')
482 topic = _(b'linking') if hardlink else _(b'copying')
483 with ui.makeprogress(topic, unit=_(b'files')) as progress:
483 with ui.makeprogress(topic, unit=_(b'files')) as progress:
484 num = 0
484 num = 0
485 srcpublishing = srcrepo.publishing()
485 srcpublishing = srcrepo.publishing()
486 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
486 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
487 dstvfs = vfsmod.vfs(destpath)
487 dstvfs = vfsmod.vfs(destpath)
488 for f in srcrepo.store.copylist():
488 for f in srcrepo.store.copylist():
489 if srcpublishing and f.endswith(b'phaseroots'):
489 if srcpublishing and f.endswith(b'phaseroots'):
490 continue
490 continue
491 dstbase = os.path.dirname(f)
491 dstbase = os.path.dirname(f)
492 if dstbase and not dstvfs.exists(dstbase):
492 if dstbase and not dstvfs.exists(dstbase):
493 dstvfs.mkdir(dstbase)
493 dstvfs.mkdir(dstbase)
494 if srcvfs.exists(f):
494 if srcvfs.exists(f):
495 if f.endswith(b'data'):
495 if f.endswith(b'data'):
496 # 'dstbase' may be empty (e.g. revlog format 0)
496 # 'dstbase' may be empty (e.g. revlog format 0)
497 lockfile = os.path.join(dstbase, b"lock")
497 lockfile = os.path.join(dstbase, b"lock")
498 # lock to avoid premature writing to the target
498 # lock to avoid premature writing to the target
499 destlock = lock.lock(dstvfs, lockfile)
499 destlock = lock.lock(dstvfs, lockfile)
500 hardlink, n = util.copyfiles(
500 hardlink, n = util.copyfiles(
501 srcvfs.join(f), dstvfs.join(f), hardlink, progress
501 srcvfs.join(f), dstvfs.join(f), hardlink, progress
502 )
502 )
503 num += n
503 num += n
504 if hardlink:
504 if hardlink:
505 ui.debug(b"linked %d files\n" % num)
505 ui.debug(b"linked %d files\n" % num)
506 else:
506 else:
507 ui.debug(b"copied %d files\n" % num)
507 ui.debug(b"copied %d files\n" % num)
508 return destlock
508 return destlock
509 except: # re-raises
509 except: # re-raises
510 release(destlock)
510 release(destlock)
511 raise
511 raise
512
512
513
513
514 def clonewithshare(
514 def clonewithshare(
515 ui,
515 ui,
516 peeropts,
516 peeropts,
517 sharepath,
517 sharepath,
518 source,
518 source,
519 srcpeer,
519 srcpeer,
520 dest,
520 dest,
521 pull=False,
521 pull=False,
522 rev=None,
522 rev=None,
523 update=True,
523 update=True,
524 stream=False,
524 stream=False,
525 ):
525 ):
526 """Perform a clone using a shared repo.
526 """Perform a clone using a shared repo.
527
527
528 The store for the repository will be located at <sharepath>/.hg. The
528 The store for the repository will be located at <sharepath>/.hg. The
529 specified revisions will be cloned or pulled from "source". A shared repo
529 specified revisions will be cloned or pulled from "source". A shared repo
530 will be created at "dest" and a working copy will be created if "update" is
530 will be created at "dest" and a working copy will be created if "update" is
531 True.
531 True.
532 """
532 """
533 revs = None
533 revs = None
534 if rev:
534 if rev:
535 if not srcpeer.capable(b'lookup'):
535 if not srcpeer.capable(b'lookup'):
536 raise error.Abort(
536 raise error.Abort(
537 _(
537 _(
538 b"src repository does not support "
538 b"src repository does not support "
539 b"revision lookup and so doesn't "
539 b"revision lookup and so doesn't "
540 b"support clone by revision"
540 b"support clone by revision"
541 )
541 )
542 )
542 )
543
543
544 # TODO this is batchable.
544 # TODO this is batchable.
545 remoterevs = []
545 remoterevs = []
546 for r in rev:
546 for r in rev:
547 with srcpeer.commandexecutor() as e:
547 with srcpeer.commandexecutor() as e:
548 remoterevs.append(
548 remoterevs.append(
549 e.callcommand(
549 e.callcommand(
550 b'lookup',
550 b'lookup',
551 {
551 {
552 b'key': r,
552 b'key': r,
553 },
553 },
554 ).result()
554 ).result()
555 )
555 )
556 revs = remoterevs
556 revs = remoterevs
557
557
558 # Obtain a lock before checking for or cloning the pooled repo otherwise
558 # Obtain a lock before checking for or cloning the pooled repo otherwise
559 # 2 clients may race creating or populating it.
559 # 2 clients may race creating or populating it.
560 pooldir = os.path.dirname(sharepath)
560 pooldir = os.path.dirname(sharepath)
561 # lock class requires the directory to exist.
561 # lock class requires the directory to exist.
562 try:
562 try:
563 util.makedir(pooldir, False)
563 util.makedir(pooldir, False)
564 except FileExistsError:
564 except FileExistsError:
565 pass
565 pass
566
566
567 poolvfs = vfsmod.vfs(pooldir)
567 poolvfs = vfsmod.vfs(pooldir)
568 basename = os.path.basename(sharepath)
568 basename = os.path.basename(sharepath)
569
569
570 with lock.lock(poolvfs, b'%s.lock' % basename):
570 with lock.lock(poolvfs, b'%s.lock' % basename):
571 if os.path.exists(sharepath):
571 if os.path.exists(sharepath):
572 ui.status(
572 ui.status(
573 _(b'(sharing from existing pooled repository %s)\n') % basename
573 _(b'(sharing from existing pooled repository %s)\n') % basename
574 )
574 )
575 else:
575 else:
576 ui.status(
576 ui.status(
577 _(b'(sharing from new pooled repository %s)\n') % basename
577 _(b'(sharing from new pooled repository %s)\n') % basename
578 )
578 )
579 # Always use pull mode because hardlinks in share mode don't work
579 # Always use pull mode because hardlinks in share mode don't work
580 # well. Never update because working copies aren't necessary in
580 # well. Never update because working copies aren't necessary in
581 # share mode.
581 # share mode.
582 clone(
582 clone(
583 ui,
583 ui,
584 peeropts,
584 peeropts,
585 source,
585 source,
586 dest=sharepath,
586 dest=sharepath,
587 pull=True,
587 pull=True,
588 revs=rev,
588 revs=rev,
589 update=False,
589 update=False,
590 stream=stream,
590 stream=stream,
591 )
591 )
592
592
593 # Resolve the value to put in [paths] section for the source.
593 # Resolve the value to put in [paths] section for the source.
594 if islocal(source):
594 if islocal(source):
595 defaultpath = util.abspath(urlutil.urllocalpath(source))
595 defaultpath = util.abspath(urlutil.urllocalpath(source))
596 else:
596 else:
597 defaultpath = source
597 defaultpath = source
598
598
599 sharerepo = repository(ui, path=sharepath)
599 sharerepo = repository(ui, path=sharepath)
600 destrepo = share(
600 destrepo = share(
601 ui,
601 ui,
602 sharerepo,
602 sharerepo,
603 dest=dest,
603 dest=dest,
604 update=False,
604 update=False,
605 bookmarks=False,
605 bookmarks=False,
606 defaultpath=defaultpath,
606 defaultpath=defaultpath,
607 )
607 )
608
608
609 # We need to perform a pull against the dest repo to fetch bookmarks
609 # We need to perform a pull against the dest repo to fetch bookmarks
610 # and other non-store data that isn't shared by default. In the case of
610 # and other non-store data that isn't shared by default. In the case of
611 # non-existing shared repo, this means we pull from the remote twice. This
611 # non-existing shared repo, this means we pull from the remote twice. This
612 # is a bit weird. But at the time it was implemented, there wasn't an easy
612 # is a bit weird. But at the time it was implemented, there wasn't an easy
613 # way to pull just non-changegroup data.
613 # way to pull just non-changegroup data.
614 exchange.pull(destrepo, srcpeer, heads=revs)
614 exchange.pull(destrepo, srcpeer, heads=revs)
615
615
616 _postshareupdate(destrepo, update)
616 _postshareupdate(destrepo, update)
617
617
618 return srcpeer, peer(ui, peeropts, dest)
618 return srcpeer, peer(ui, peeropts, dest)
619
619
620
620
621 # Recomputing caches is often slow on big repos, so copy them.
621 # Recomputing caches is often slow on big repos, so copy them.
622 def _copycache(srcrepo, dstcachedir, fname):
622 def _copycache(srcrepo, dstcachedir, fname):
623 """copy a cache from srcrepo to destcachedir (if it exists)"""
623 """copy a cache from srcrepo to destcachedir (if it exists)"""
624 srcfname = srcrepo.cachevfs.join(fname)
624 srcfname = srcrepo.cachevfs.join(fname)
625 dstfname = os.path.join(dstcachedir, fname)
625 dstfname = os.path.join(dstcachedir, fname)
626 if os.path.exists(srcfname):
626 if os.path.exists(srcfname):
627 if not os.path.exists(dstcachedir):
627 if not os.path.exists(dstcachedir):
628 os.mkdir(dstcachedir)
628 os.mkdir(dstcachedir)
629 util.copyfile(srcfname, dstfname)
629 util.copyfile(srcfname, dstfname)
630
630
631
631
632 def clone(
632 def clone(
633 ui,
633 ui,
634 peeropts,
634 peeropts,
635 source,
635 source,
636 dest=None,
636 dest=None,
637 pull=False,
637 pull=False,
638 revs=None,
638 revs=None,
639 update=True,
639 update=True,
640 stream=False,
640 stream=False,
641 branch=None,
641 branch=None,
642 shareopts=None,
642 shareopts=None,
643 storeincludepats=None,
643 storeincludepats=None,
644 storeexcludepats=None,
644 storeexcludepats=None,
645 depth=None,
645 depth=None,
646 ):
646 ):
647 """Make a copy of an existing repository.
647 """Make a copy of an existing repository.
648
648
649 Create a copy of an existing repository in a new directory. The
649 Create a copy of an existing repository in a new directory. The
650 source and destination are URLs, as passed to the repository
650 source and destination are URLs, as passed to the repository
651 function. Returns a pair of repository peers, the source and
651 function. Returns a pair of repository peers, the source and
652 newly created destination.
652 newly created destination.
653
653
654 The location of the source is added to the new repository's
654 The location of the source is added to the new repository's
655 .hg/hgrc file, as the default to be used for future pulls and
655 .hg/hgrc file, as the default to be used for future pulls and
656 pushes.
656 pushes.
657
657
658 If an exception is raised, the partly cloned/updated destination
658 If an exception is raised, the partly cloned/updated destination
659 repository will be deleted.
659 repository will be deleted.
660
660
661 Arguments:
661 Arguments:
662
662
663 source: repository object or URL
663 source: repository object or URL
664
664
665 dest: URL of destination repository to create (defaults to base
665 dest: URL of destination repository to create (defaults to base
666 name of source repository)
666 name of source repository)
667
667
668 pull: always pull from source repository, even in local case or if the
668 pull: always pull from source repository, even in local case or if the
669 server prefers streaming
669 server prefers streaming
670
670
671 stream: stream raw data uncompressed from repository (fast over
671 stream: stream raw data uncompressed from repository (fast over
672 LAN, slow over WAN)
672 LAN, slow over WAN)
673
673
674 revs: revision to clone up to (implies pull=True)
674 revs: revision to clone up to (implies pull=True)
675
675
676 update: update working directory after clone completes, if
676 update: update working directory after clone completes, if
677 destination is local repository (True means update to default rev,
677 destination is local repository (True means update to default rev,
678 anything else is treated as a revision)
678 anything else is treated as a revision)
679
679
680 branch: branches to clone
680 branch: branches to clone
681
681
682 shareopts: dict of options to control auto sharing behavior. The "pool" key
682 shareopts: dict of options to control auto sharing behavior. The "pool" key
683 activates auto sharing mode and defines the directory for stores. The
683 activates auto sharing mode and defines the directory for stores. The
684 "mode" key determines how to construct the directory name of the shared
684 "mode" key determines how to construct the directory name of the shared
685 repository. "identity" means the name is derived from the node of the first
685 repository. "identity" means the name is derived from the node of the first
686 changeset in the repository. "remote" means the name is derived from the
686 changeset in the repository. "remote" means the name is derived from the
687 remote's path/URL. Defaults to "identity."
687 remote's path/URL. Defaults to "identity."
688
688
689 storeincludepats and storeexcludepats: sets of file patterns to include and
689 storeincludepats and storeexcludepats: sets of file patterns to include and
690 exclude in the repository copy, respectively. If not defined, all files
690 exclude in the repository copy, respectively. If not defined, all files
691 will be included (a "full" clone). Otherwise a "narrow" clone containing
691 will be included (a "full" clone). Otherwise a "narrow" clone containing
692 only the requested files will be performed. If ``storeincludepats`` is not
692 only the requested files will be performed. If ``storeincludepats`` is not
693 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
693 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
694 ``path:.``. If both are empty sets, no files will be cloned.
694 ``path:.``. If both are empty sets, no files will be cloned.
695 """
695 """
696
696
697 if isinstance(source, bytes):
697 if isinstance(source, bytes):
698 src = urlutil.get_clone_path(ui, source, branch)
698 src = urlutil.get_clone_path(ui, source, branch)
699 origsource, source, branches = src
699 origsource, source, branches = src
700 srcpeer = peer(ui, peeropts, source)
700 srcpeer = peer(ui, peeropts, source)
701 else:
701 else:
702 srcpeer = source.peer() # in case we were called with a localrepo
702 srcpeer = source.peer() # in case we were called with a localrepo
703 branches = (None, branch or [])
703 branches = (None, branch or [])
704 origsource = source = srcpeer.url()
704 origsource = source = srcpeer.url()
705 srclock = destlock = destwlock = cleandir = None
705 srclock = destlock = destwlock = cleandir = None
706 destpeer = None
706 destpeer = None
707 try:
707 try:
708 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
708 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
709
709
710 if dest is None:
710 if dest is None:
711 dest = defaultdest(source)
711 dest = defaultdest(source)
712 if dest:
712 if dest:
713 ui.status(_(b"destination directory: %s\n") % dest)
713 ui.status(_(b"destination directory: %s\n") % dest)
714 else:
714 else:
715 dest = urlutil.get_clone_path(ui, dest)[0]
715 dest = urlutil.get_clone_path(ui, dest)[0]
716
716
717 dest = urlutil.urllocalpath(dest)
717 dest = urlutil.urllocalpath(dest)
718 source = urlutil.urllocalpath(source)
718 source = urlutil.urllocalpath(source)
719
719
720 if not dest:
720 if not dest:
721 raise error.InputError(_(b"empty destination path is not valid"))
721 raise error.InputError(_(b"empty destination path is not valid"))
722
722
723 destvfs = vfsmod.vfs(dest, expandpath=True)
723 destvfs = vfsmod.vfs(dest, expandpath=True)
724 if destvfs.lexists():
724 if destvfs.lexists():
725 if not destvfs.isdir():
725 if not destvfs.isdir():
726 raise error.InputError(
726 raise error.InputError(
727 _(b"destination '%s' already exists") % dest
727 _(b"destination '%s' already exists") % dest
728 )
728 )
729 elif destvfs.listdir():
729 elif destvfs.listdir():
730 raise error.InputError(
730 raise error.InputError(
731 _(b"destination '%s' is not empty") % dest
731 _(b"destination '%s' is not empty") % dest
732 )
732 )
733
733
734 createopts = {}
734 createopts = {}
735 narrow = False
735 narrow = False
736
736
737 if storeincludepats is not None:
737 if storeincludepats is not None:
738 narrowspec.validatepatterns(storeincludepats)
738 narrowspec.validatepatterns(storeincludepats)
739 narrow = True
739 narrow = True
740
740
741 if storeexcludepats is not None:
741 if storeexcludepats is not None:
742 narrowspec.validatepatterns(storeexcludepats)
742 narrowspec.validatepatterns(storeexcludepats)
743 narrow = True
743 narrow = True
744
744
745 if narrow:
745 if narrow:
746 # Include everything by default if only exclusion patterns defined.
746 # Include everything by default if only exclusion patterns defined.
747 if storeexcludepats and not storeincludepats:
747 if storeexcludepats and not storeincludepats:
748 storeincludepats = {b'path:.'}
748 storeincludepats = {b'path:.'}
749
749
750 createopts[b'narrowfiles'] = True
750 createopts[b'narrowfiles'] = True
751
751
752 if depth:
752 if depth:
753 createopts[b'shallowfilestore'] = True
753 createopts[b'shallowfilestore'] = True
754
754
755 if srcpeer.capable(b'lfs-serve'):
755 if srcpeer.capable(b'lfs-serve'):
756 # Repository creation honors the config if it disabled the extension, so
756 # Repository creation honors the config if it disabled the extension, so
757 # we can't just announce that lfs will be enabled. This check avoids
757 # we can't just announce that lfs will be enabled. This check avoids
758 # saying that lfs will be enabled, and then saying it's an unknown
758 # saying that lfs will be enabled, and then saying it's an unknown
759 # feature. The lfs creation option is set in either case so that a
759 # feature. The lfs creation option is set in either case so that a
760 # requirement is added. If the extension is explicitly disabled but the
760 # requirement is added. If the extension is explicitly disabled but the
761 # requirement is set, the clone aborts early, before transferring any
761 # requirement is set, the clone aborts early, before transferring any
762 # data.
762 # data.
763 createopts[b'lfs'] = True
763 createopts[b'lfs'] = True
764
764
765 if extensions.disabled_help(b'lfs'):
765 if extensions.disabled_help(b'lfs'):
766 ui.status(
766 ui.status(
767 _(
767 _(
768 b'(remote is using large file support (lfs), but it is '
768 b'(remote is using large file support (lfs), but it is '
769 b'explicitly disabled in the local configuration)\n'
769 b'explicitly disabled in the local configuration)\n'
770 )
770 )
771 )
771 )
772 else:
772 else:
773 ui.status(
773 ui.status(
774 _(
774 _(
775 b'(remote is using large file support (lfs); lfs will '
775 b'(remote is using large file support (lfs); lfs will '
776 b'be enabled for this repository)\n'
776 b'be enabled for this repository)\n'
777 )
777 )
778 )
778 )
779
779
780 shareopts = shareopts or {}
780 shareopts = shareopts or {}
781 sharepool = shareopts.get(b'pool')
781 sharepool = shareopts.get(b'pool')
782 sharenamemode = shareopts.get(b'mode')
782 sharenamemode = shareopts.get(b'mode')
783 if sharepool and islocal(dest):
783 if sharepool and islocal(dest):
784 sharepath = None
784 sharepath = None
785 if sharenamemode == b'identity':
785 if sharenamemode == b'identity':
786 # Resolve the name from the initial changeset in the remote
786 # Resolve the name from the initial changeset in the remote
787 # repository. This returns nullid when the remote is empty. It
787 # repository. This returns nullid when the remote is empty. It
788 # raises RepoLookupError if revision 0 is filtered or otherwise
788 # raises RepoLookupError if revision 0 is filtered or otherwise
789 # not available. If we fail to resolve, sharing is not enabled.
789 # not available. If we fail to resolve, sharing is not enabled.
790 try:
790 try:
791 with srcpeer.commandexecutor() as e:
791 with srcpeer.commandexecutor() as e:
792 rootnode = e.callcommand(
792 rootnode = e.callcommand(
793 b'lookup',
793 b'lookup',
794 {
794 {
795 b'key': b'0',
795 b'key': b'0',
796 },
796 },
797 ).result()
797 ).result()
798
798
799 if rootnode != sha1nodeconstants.nullid:
799 if rootnode != sha1nodeconstants.nullid:
800 sharepath = os.path.join(sharepool, hex(rootnode))
800 sharepath = os.path.join(sharepool, hex(rootnode))
801 else:
801 else:
802 ui.status(
802 ui.status(
803 _(
803 _(
804 b'(not using pooled storage: '
804 b'(not using pooled storage: '
805 b'remote appears to be empty)\n'
805 b'remote appears to be empty)\n'
806 )
806 )
807 )
807 )
808 except error.RepoLookupError:
808 except error.RepoLookupError:
809 ui.status(
809 ui.status(
810 _(
810 _(
811 b'(not using pooled storage: '
811 b'(not using pooled storage: '
812 b'unable to resolve identity of remote)\n'
812 b'unable to resolve identity of remote)\n'
813 )
813 )
814 )
814 )
815 elif sharenamemode == b'remote':
815 elif sharenamemode == b'remote':
816 sharepath = os.path.join(
816 sharepath = os.path.join(
817 sharepool, hex(hashutil.sha1(source).digest())
817 sharepool, hex(hashutil.sha1(source).digest())
818 )
818 )
819 else:
819 else:
820 raise error.Abort(
820 raise error.Abort(
821 _(b'unknown share naming mode: %s') % sharenamemode
821 _(b'unknown share naming mode: %s') % sharenamemode
822 )
822 )
823
823
824 # TODO this is a somewhat arbitrary restriction.
824 # TODO this is a somewhat arbitrary restriction.
825 if narrow:
825 if narrow:
826 ui.status(
826 ui.status(
827 _(b'(pooled storage not supported for narrow clones)\n')
827 _(b'(pooled storage not supported for narrow clones)\n')
828 )
828 )
829 sharepath = None
829 sharepath = None
830
830
831 if sharepath:
831 if sharepath:
832 return clonewithshare(
832 return clonewithshare(
833 ui,
833 ui,
834 peeropts,
834 peeropts,
835 sharepath,
835 sharepath,
836 source,
836 source,
837 srcpeer,
837 srcpeer,
838 dest,
838 dest,
839 pull=pull,
839 pull=pull,
840 rev=revs,
840 rev=revs,
841 update=update,
841 update=update,
842 stream=stream,
842 stream=stream,
843 )
843 )
844
844
845 srcrepo = srcpeer.local()
845 srcrepo = srcpeer.local()
846
846
847 abspath = origsource
847 abspath = origsource
848 if islocal(origsource):
848 if islocal(origsource):
849 abspath = util.abspath(urlutil.urllocalpath(origsource))
849 abspath = util.abspath(urlutil.urllocalpath(origsource))
850
850
851 if islocal(dest):
851 if islocal(dest):
852 if os.path.exists(dest):
852 if os.path.exists(dest):
853 # only clean up directories we create ourselves
853 # only clean up directories we create ourselves
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
854 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 cleandir = hgdir
855 cleandir = hgdir
856 else:
856 else:
857 cleandir = dest
857 cleandir = dest
858
858
859 copy = False
859 copy = False
860 if (
860 if (
861 srcrepo
861 srcrepo
862 and srcrepo.cancopy()
862 and srcrepo.cancopy()
863 and islocal(dest)
863 and islocal(dest)
864 and not phases.hassecret(srcrepo)
864 and not phases.hassecret(srcrepo)
865 ):
865 ):
866 copy = not pull and not revs
866 copy = not pull and not revs
867
867
868 # TODO this is a somewhat arbitrary restriction.
868 # TODO this is a somewhat arbitrary restriction.
869 if narrow:
869 if narrow:
870 copy = False
870 copy = False
871
871
872 if copy:
872 if copy:
873 try:
873 try:
874 # we use a lock here because if we race with commit, we
874 # we use a lock here because if we race with commit, we
875 # can end up with extra data in the cloned revlogs that's
875 # can end up with extra data in the cloned revlogs that's
876 # not pointed to by changesets, thus causing verify to
876 # not pointed to by changesets, thus causing verify to
877 # fail
877 # fail
878 srclock = srcrepo.lock(wait=False)
878 srclock = srcrepo.lock(wait=False)
879 except error.LockError:
879 except error.LockError:
880 copy = False
880 copy = False
881
881
882 if copy:
882 if copy:
883 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
883 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
884
884
885 destrootpath = urlutil.urllocalpath(dest)
885 destrootpath = urlutil.urllocalpath(dest)
886 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
886 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
887 localrepo.createrepository(
887 localrepo.createrepository(
888 ui,
888 ui,
889 destrootpath,
889 destrootpath,
890 requirements=dest_reqs,
890 requirements=dest_reqs,
891 )
891 )
892 destrepo = localrepo.makelocalrepository(ui, destrootpath)
892 destrepo = localrepo.makelocalrepository(ui, destrootpath)
893
893
894 destwlock = destrepo.wlock()
894 destwlock = destrepo.wlock()
895 destlock = destrepo.lock()
895 destlock = destrepo.lock()
896 from . import streamclone # avoid cycle
896 from . import streamclone # avoid cycle
897
897
898 streamclone.local_copy(srcrepo, destrepo)
898 streamclone.local_copy(srcrepo, destrepo)
899
899
900 # we need to re-init the repo after manually copying the data
900 # we need to re-init the repo after manually copying the data
901 # into it
901 # into it
902 destpeer = peer(srcrepo, peeropts, dest)
902 destpeer = peer(srcrepo, peeropts, dest)
903
903
904 # make the peer aware that is it already locked
904 # make the peer aware that is it already locked
905 #
905 #
906 # important:
906 # important:
907 #
907 #
908 # We still need to release that lock at the end of the function
908 # We still need to release that lock at the end of the function
909 destpeer.local()._lockref = weakref.ref(destlock)
909 destpeer.local()._lockref = weakref.ref(destlock)
910 destpeer.local()._wlockref = weakref.ref(destwlock)
910 destpeer.local()._wlockref = weakref.ref(destwlock)
911 # dirstate also needs to be copied because `_wlockref` has a reference
911 # dirstate also needs to be copied because `_wlockref` has a reference
912 # to it: this dirstate is saved to disk when the wlock is released
912 # to it: this dirstate is saved to disk when the wlock is released
913 destpeer.local().dirstate = destrepo.dirstate
913 destpeer.local().dirstate = destrepo.dirstate
914
914
915 srcrepo.hook(
915 srcrepo.hook(
916 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
916 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
917 )
917 )
918 else:
918 else:
919 try:
919 try:
920 # only pass ui when no srcrepo
920 # only pass ui when no srcrepo
921 destpeer = peer(
921 destpeer = peer(
922 srcrepo or ui,
922 srcrepo or ui,
923 peeropts,
923 peeropts,
924 dest,
924 dest,
925 create=True,
925 create=True,
926 createopts=createopts,
926 createopts=createopts,
927 )
927 )
928 except FileExistsError:
928 except FileExistsError:
929 cleandir = None
929 cleandir = None
930 raise error.Abort(_(b"destination '%s' already exists") % dest)
930 raise error.Abort(_(b"destination '%s' already exists") % dest)
931
931
932 if revs:
932 if revs:
933 if not srcpeer.capable(b'lookup'):
933 if not srcpeer.capable(b'lookup'):
934 raise error.Abort(
934 raise error.Abort(
935 _(
935 _(
936 b"src repository does not support "
936 b"src repository does not support "
937 b"revision lookup and so doesn't "
937 b"revision lookup and so doesn't "
938 b"support clone by revision"
938 b"support clone by revision"
939 )
939 )
940 )
940 )
941
941
942 # TODO this is batchable.
942 # TODO this is batchable.
943 remoterevs = []
943 remoterevs = []
944 for rev in revs:
944 for rev in revs:
945 with srcpeer.commandexecutor() as e:
945 with srcpeer.commandexecutor() as e:
946 remoterevs.append(
946 remoterevs.append(
947 e.callcommand(
947 e.callcommand(
948 b'lookup',
948 b'lookup',
949 {
949 {
950 b'key': rev,
950 b'key': rev,
951 },
951 },
952 ).result()
952 ).result()
953 )
953 )
954 revs = remoterevs
954 revs = remoterevs
955
955
956 checkout = revs[0]
956 checkout = revs[0]
957 else:
957 else:
958 revs = None
958 revs = None
959 local = destpeer.local()
959 local = destpeer.local()
960 if local:
960 if local:
961 if narrow:
961 if narrow:
962 with local.wlock(), local.lock():
962 with local.wlock(), local.lock():
963 local.setnarrowpats(storeincludepats, storeexcludepats)
963 local.setnarrowpats(storeincludepats, storeexcludepats)
964 narrowspec.copytoworkingcopy(local)
964 narrowspec.copytoworkingcopy(local)
965
965
966 u = urlutil.url(abspath)
966 u = urlutil.url(abspath)
967 defaulturl = bytes(u)
967 defaulturl = bytes(u)
968 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
968 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
969 if not stream:
969 if not stream:
970 if pull:
970 if pull:
971 stream = False
971 stream = False
972 else:
972 else:
973 stream = None
973 stream = None
974 # internal config: ui.quietbookmarkmove
974 # internal config: ui.quietbookmarkmove
975 overrides = {(b'ui', b'quietbookmarkmove'): True}
975 overrides = {(b'ui', b'quietbookmarkmove'): True}
976 with local.ui.configoverride(overrides, b'clone'):
976 with local.ui.configoverride(overrides, b'clone'):
977 exchange.pull(
977 exchange.pull(
978 local,
978 local,
979 srcpeer,
979 srcpeer,
980 heads=revs,
980 heads=revs,
981 streamclonerequested=stream,
981 streamclonerequested=stream,
982 includepats=storeincludepats,
982 includepats=storeincludepats,
983 excludepats=storeexcludepats,
983 excludepats=storeexcludepats,
984 depth=depth,
984 depth=depth,
985 )
985 )
986 elif srcrepo:
986 elif srcrepo:
987 # TODO lift restriction once exchange.push() accepts narrow
987 # TODO lift restriction once exchange.push() accepts narrow
988 # push.
988 # push.
989 if narrow:
989 if narrow:
990 raise error.Abort(
990 raise error.Abort(
991 _(
991 _(
992 b'narrow clone not available for '
992 b'narrow clone not available for '
993 b'remote destinations'
993 b'remote destinations'
994 )
994 )
995 )
995 )
996
996
997 exchange.push(
997 exchange.push(
998 srcrepo,
998 srcrepo,
999 destpeer,
999 destpeer,
1000 revs=revs,
1000 revs=revs,
1001 bookmarks=srcrepo._bookmarks.keys(),
1001 bookmarks=srcrepo._bookmarks.keys(),
1002 )
1002 )
1003 else:
1003 else:
1004 raise error.Abort(
1004 raise error.Abort(
1005 _(b"clone from remote to remote not supported")
1005 _(b"clone from remote to remote not supported")
1006 )
1006 )
1007
1007
1008 cleandir = None
1008 cleandir = None
1009
1009
1010 destrepo = destpeer.local()
1010 destrepo = destpeer.local()
1011 if destrepo:
1011 if destrepo:
1012 template = uimod.samplehgrcs[b'cloned']
1012 template = uimod.samplehgrcs[b'cloned']
1013 u = urlutil.url(abspath)
1013 u = urlutil.url(abspath)
1014 u.passwd = None
1014 u.passwd = None
1015 defaulturl = bytes(u)
1015 defaulturl = bytes(u)
1016 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1016 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1017 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1017 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1018
1018
1019 if ui.configbool(b'experimental', b'remotenames'):
1019 if ui.configbool(b'experimental', b'remotenames'):
1020 logexchange.pullremotenames(destrepo, srcpeer)
1020 logexchange.pullremotenames(destrepo, srcpeer)
1021
1021
1022 if update:
1022 if update:
1023 if update is not True:
1023 if update is not True:
1024 with srcpeer.commandexecutor() as e:
1024 with srcpeer.commandexecutor() as e:
1025 checkout = e.callcommand(
1025 checkout = e.callcommand(
1026 b'lookup',
1026 b'lookup',
1027 {
1027 {
1028 b'key': update,
1028 b'key': update,
1029 },
1029 },
1030 ).result()
1030 ).result()
1031
1031
1032 uprev = None
1032 uprev = None
1033 status = None
1033 status = None
1034 if checkout is not None:
1034 if checkout is not None:
1035 # Some extensions (at least hg-git and hg-subversion) have
1035 # Some extensions (at least hg-git and hg-subversion) have
1036 # a peer.lookup() implementation that returns a name instead
1036 # a peer.lookup() implementation that returns a name instead
1037 # of a nodeid. We work around it here until we've figured
1037 # of a nodeid. We work around it here until we've figured
1038 # out a better solution.
1038 # out a better solution.
1039 if len(checkout) == 20 and checkout in destrepo:
1039 if len(checkout) == 20 and checkout in destrepo:
1040 uprev = checkout
1040 uprev = checkout
1041 elif scmutil.isrevsymbol(destrepo, checkout):
1041 elif scmutil.isrevsymbol(destrepo, checkout):
1042 uprev = scmutil.revsymbol(destrepo, checkout).node()
1042 uprev = scmutil.revsymbol(destrepo, checkout).node()
1043 else:
1043 else:
1044 if update is not True:
1044 if update is not True:
1045 try:
1045 try:
1046 uprev = destrepo.lookup(update)
1046 uprev = destrepo.lookup(update)
1047 except error.RepoLookupError:
1047 except error.RepoLookupError:
1048 pass
1048 pass
1049 if uprev is None:
1049 if uprev is None:
1050 try:
1050 try:
1051 if destrepo._activebookmark:
1051 if destrepo._activebookmark:
1052 uprev = destrepo.lookup(destrepo._activebookmark)
1052 uprev = destrepo.lookup(destrepo._activebookmark)
1053 update = destrepo._activebookmark
1053 update = destrepo._activebookmark
1054 else:
1054 else:
1055 uprev = destrepo._bookmarks[b'@']
1055 uprev = destrepo._bookmarks[b'@']
1056 update = b'@'
1056 update = b'@'
1057 bn = destrepo[uprev].branch()
1057 bn = destrepo[uprev].branch()
1058 if bn == b'default':
1058 if bn == b'default':
1059 status = _(b"updating to bookmark %s\n" % update)
1059 status = _(b"updating to bookmark %s\n" % update)
1060 else:
1060 else:
1061 status = (
1061 status = (
1062 _(b"updating to bookmark %s on branch %s\n")
1062 _(b"updating to bookmark %s on branch %s\n")
1063 ) % (update, bn)
1063 ) % (update, bn)
1064 except KeyError:
1064 except KeyError:
1065 try:
1065 try:
1066 uprev = destrepo.branchtip(b'default')
1066 uprev = destrepo.branchtip(b'default')
1067 except error.RepoLookupError:
1067 except error.RepoLookupError:
1068 uprev = destrepo.lookup(b'tip')
1068 uprev = destrepo.lookup(b'tip')
1069 if not status:
1069 if not status:
1070 bn = destrepo[uprev].branch()
1070 bn = destrepo[uprev].branch()
1071 status = _(b"updating to branch %s\n") % bn
1071 status = _(b"updating to branch %s\n") % bn
1072 destrepo.ui.status(status)
1072 destrepo.ui.status(status)
1073 _update(destrepo, uprev)
1073 _update(destrepo, uprev)
1074 if update in destrepo._bookmarks:
1074 if update in destrepo._bookmarks:
1075 bookmarks.activate(destrepo, update)
1075 bookmarks.activate(destrepo, update)
1076 if destlock is not None:
1076 if destlock is not None:
1077 release(destlock)
1077 release(destlock)
1078 if destwlock is not None:
1078 if destwlock is not None:
1079 release(destlock)
1079 release(destlock)
1080 # here is a tiny windows were someone could end up writing the
1080 # here is a tiny windows were someone could end up writing the
1081 # repository before the cache are sure to be warm. This is "fine"
1081 # repository before the cache are sure to be warm. This is "fine"
1082 # as the only "bad" outcome would be some slowness. That potential
1082 # as the only "bad" outcome would be some slowness. That potential
1083 # slowness already affect reader.
1083 # slowness already affect reader.
1084 with destrepo.lock():
1084 with destrepo.lock():
1085 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1085 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1086 finally:
1086 finally:
1087 release(srclock, destlock, destwlock)
1087 release(srclock, destlock, destwlock)
1088 if cleandir is not None:
1088 if cleandir is not None:
1089 shutil.rmtree(cleandir, True)
1089 shutil.rmtree(cleandir, True)
1090 if srcpeer is not None:
1090 if srcpeer is not None:
1091 srcpeer.close()
1091 srcpeer.close()
1092 if destpeer and destpeer.local() is None:
1092 if destpeer and destpeer.local() is None:
1093 destpeer.close()
1093 destpeer.close()
1094 return srcpeer, destpeer
1094 return srcpeer, destpeer
1095
1095
1096
1096
1097 def _showstats(repo, stats, quietempty=False):
1097 def _showstats(repo, stats, quietempty=False):
1098 if quietempty and stats.isempty():
1098 if quietempty and stats.isempty():
1099 return
1099 return
1100 repo.ui.status(
1100 repo.ui.status(
1101 _(
1101 _(
1102 b"%d files updated, %d files merged, "
1102 b"%d files updated, %d files merged, "
1103 b"%d files removed, %d files unresolved\n"
1103 b"%d files removed, %d files unresolved\n"
1104 )
1104 )
1105 % (
1105 % (
1106 stats.updatedcount,
1106 stats.updatedcount,
1107 stats.mergedcount,
1107 stats.mergedcount,
1108 stats.removedcount,
1108 stats.removedcount,
1109 stats.unresolvedcount,
1109 stats.unresolvedcount,
1110 )
1110 )
1111 )
1111 )
1112
1112
1113
1113
1114 def updaterepo(repo, node, overwrite, updatecheck=None):
1114 def updaterepo(repo, node, overwrite, updatecheck=None):
1115 """Update the working directory to node.
1115 """Update the working directory to node.
1116
1116
1117 When overwrite is set, changes are clobbered, merged else
1117 When overwrite is set, changes are clobbered, merged else
1118
1118
1119 returns stats (see pydoc mercurial.merge.applyupdates)"""
1119 returns stats (see pydoc mercurial.merge.applyupdates)"""
1120 repo.ui.deprecwarn(
1120 repo.ui.deprecwarn(
1121 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1121 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1122 b'5.7',
1122 b'5.7',
1123 )
1123 )
1124 return mergemod._update(
1124 return mergemod._update(
1125 repo,
1125 repo,
1126 node,
1126 node,
1127 branchmerge=False,
1127 branchmerge=False,
1128 force=overwrite,
1128 force=overwrite,
1129 labels=[b'working copy', b'destination'],
1129 labels=[b'working copy', b'destination'],
1130 updatecheck=updatecheck,
1130 updatecheck=updatecheck,
1131 )
1131 )
1132
1132
1133
1133
1134 def update(repo, node, quietempty=False, updatecheck=None):
1134 def update(repo, node, quietempty=False, updatecheck=None):
1135 """update the working directory to node"""
1135 """update the working directory to node"""
1136 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1136 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1137 _showstats(repo, stats, quietempty)
1137 _showstats(repo, stats, quietempty)
1138 if stats.unresolvedcount:
1138 if stats.unresolvedcount:
1139 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1139 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1140 return stats.unresolvedcount > 0
1140 return stats.unresolvedcount > 0
1141
1141
1142
1142
1143 # naming conflict in clone()
1143 # naming conflict in clone()
1144 _update = update
1144 _update = update
1145
1145
1146
1146
1147 def clean(repo, node, show_stats=True, quietempty=False):
1147 def clean(repo, node, show_stats=True, quietempty=False):
1148 """forcibly switch the working directory to node, clobbering changes"""
1148 """forcibly switch the working directory to node, clobbering changes"""
1149 stats = mergemod.clean_update(repo[node])
1149 stats = mergemod.clean_update(repo[node])
1150 assert stats.unresolvedcount == 0
1150 assert stats.unresolvedcount == 0
1151 if show_stats:
1151 if show_stats:
1152 _showstats(repo, stats, quietempty)
1152 _showstats(repo, stats, quietempty)
1153 return False
1153 return False
1154
1154
1155
1155
1156 # naming conflict in updatetotally()
1156 # naming conflict in updatetotally()
1157 _clean = clean
1157 _clean = clean
1158
1158
1159 _VALID_UPDATECHECKS = {
1159 _VALID_UPDATECHECKS = {
1160 mergemod.UPDATECHECK_ABORT,
1160 mergemod.UPDATECHECK_ABORT,
1161 mergemod.UPDATECHECK_NONE,
1161 mergemod.UPDATECHECK_NONE,
1162 mergemod.UPDATECHECK_LINEAR,
1162 mergemod.UPDATECHECK_LINEAR,
1163 mergemod.UPDATECHECK_NO_CONFLICT,
1163 mergemod.UPDATECHECK_NO_CONFLICT,
1164 }
1164 }
1165
1165
1166
1166
1167 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1167 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1168 """Update the working directory with extra care for non-file components
1168 """Update the working directory with extra care for non-file components
1169
1169
1170 This takes care of non-file components below:
1170 This takes care of non-file components below:
1171
1171
1172 :bookmark: might be advanced or (in)activated
1172 :bookmark: might be advanced or (in)activated
1173
1173
1174 This takes arguments below:
1174 This takes arguments below:
1175
1175
1176 :checkout: to which revision the working directory is updated
1176 :checkout: to which revision the working directory is updated
1177 :brev: a name, which might be a bookmark to be activated after updating
1177 :brev: a name, which might be a bookmark to be activated after updating
1178 :clean: whether changes in the working directory can be discarded
1178 :clean: whether changes in the working directory can be discarded
1179 :updatecheck: how to deal with a dirty working directory
1179 :updatecheck: how to deal with a dirty working directory
1180
1180
1181 Valid values for updatecheck are the UPDATECHECK_* constants
1181 Valid values for updatecheck are the UPDATECHECK_* constants
1182 defined in the merge module. Passing `None` will result in using the
1182 defined in the merge module. Passing `None` will result in using the
1183 configured default.
1183 configured default.
1184
1184
1185 * ABORT: abort if the working directory is dirty
1185 * ABORT: abort if the working directory is dirty
1186 * NONE: don't check (merge working directory changes into destination)
1186 * NONE: don't check (merge working directory changes into destination)
1187 * LINEAR: check that update is linear before merging working directory
1187 * LINEAR: check that update is linear before merging working directory
1188 changes into destination
1188 changes into destination
1189 * NO_CONFLICT: check that the update does not result in file merges
1189 * NO_CONFLICT: check that the update does not result in file merges
1190
1190
1191 This returns whether conflict is detected at updating or not.
1191 This returns whether conflict is detected at updating or not.
1192 """
1192 """
1193 if updatecheck is None:
1193 if updatecheck is None:
1194 updatecheck = ui.config(b'commands', b'update.check')
1194 updatecheck = ui.config(b'commands', b'update.check')
1195 if updatecheck not in _VALID_UPDATECHECKS:
1195 if updatecheck not in _VALID_UPDATECHECKS:
1196 # If not configured, or invalid value configured
1196 # If not configured, or invalid value configured
1197 updatecheck = mergemod.UPDATECHECK_LINEAR
1197 updatecheck = mergemod.UPDATECHECK_LINEAR
1198 if updatecheck not in _VALID_UPDATECHECKS:
1198 if updatecheck not in _VALID_UPDATECHECKS:
1199 raise ValueError(
1199 raise ValueError(
1200 r'Invalid updatecheck value %r (can accept %r)'
1200 r'Invalid updatecheck value %r (can accept %r)'
1201 % (updatecheck, _VALID_UPDATECHECKS)
1201 % (updatecheck, _VALID_UPDATECHECKS)
1202 )
1202 )
1203 with repo.wlock():
1203 with repo.wlock():
1204 movemarkfrom = None
1204 movemarkfrom = None
1205 warndest = False
1205 warndest = False
1206 if checkout is None:
1206 if checkout is None:
1207 updata = destutil.destupdate(repo, clean=clean)
1207 updata = destutil.destupdate(repo, clean=clean)
1208 checkout, movemarkfrom, brev = updata
1208 checkout, movemarkfrom, brev = updata
1209 warndest = True
1209 warndest = True
1210
1210
1211 if clean:
1211 if clean:
1212 ret = _clean(repo, checkout)
1212 ret = _clean(repo, checkout)
1213 else:
1213 else:
1214 if updatecheck == mergemod.UPDATECHECK_ABORT:
1214 if updatecheck == mergemod.UPDATECHECK_ABORT:
1215 cmdutil.bailifchanged(repo, merge=False)
1215 cmdutil.bailifchanged(repo, merge=False)
1216 updatecheck = mergemod.UPDATECHECK_NONE
1216 updatecheck = mergemod.UPDATECHECK_NONE
1217 ret = _update(repo, checkout, updatecheck=updatecheck)
1217 ret = _update(repo, checkout, updatecheck=updatecheck)
1218
1218
1219 if not ret and movemarkfrom:
1219 if not ret and movemarkfrom:
1220 if movemarkfrom == repo[b'.'].node():
1220 if movemarkfrom == repo[b'.'].node():
1221 pass # no-op update
1221 pass # no-op update
1222 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1222 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1223 b = ui.label(repo._activebookmark, b'bookmarks.active')
1223 b = ui.label(repo._activebookmark, b'bookmarks.active')
1224 ui.status(_(b"updating bookmark %s\n") % b)
1224 ui.status(_(b"updating bookmark %s\n") % b)
1225 else:
1225 else:
1226 # this can happen with a non-linear update
1226 # this can happen with a non-linear update
1227 b = ui.label(repo._activebookmark, b'bookmarks')
1227 b = ui.label(repo._activebookmark, b'bookmarks')
1228 ui.status(_(b"(leaving bookmark %s)\n") % b)
1228 ui.status(_(b"(leaving bookmark %s)\n") % b)
1229 bookmarks.deactivate(repo)
1229 bookmarks.deactivate(repo)
1230 elif brev in repo._bookmarks:
1230 elif brev in repo._bookmarks:
1231 if brev != repo._activebookmark:
1231 if brev != repo._activebookmark:
1232 b = ui.label(brev, b'bookmarks.active')
1232 b = ui.label(brev, b'bookmarks.active')
1233 ui.status(_(b"(activating bookmark %s)\n") % b)
1233 ui.status(_(b"(activating bookmark %s)\n") % b)
1234 bookmarks.activate(repo, brev)
1234 bookmarks.activate(repo, brev)
1235 elif brev:
1235 elif brev:
1236 if repo._activebookmark:
1236 if repo._activebookmark:
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1237 b = ui.label(repo._activebookmark, b'bookmarks')
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1238 ui.status(_(b"(leaving bookmark %s)\n") % b)
1239 bookmarks.deactivate(repo)
1239 bookmarks.deactivate(repo)
1240
1240
1241 if warndest:
1241 if warndest:
1242 destutil.statusotherdests(ui, repo)
1242 destutil.statusotherdests(ui, repo)
1243
1243
1244 return ret
1244 return ret
1245
1245
1246
1246
1247 def merge(
1247 def merge(
1248 ctx,
1248 ctx,
1249 force=False,
1249 force=False,
1250 remind=True,
1250 remind=True,
1251 labels=None,
1251 labels=None,
1252 ):
1252 ):
1253 """Branch merge with node, resolving changes. Return true if any
1253 """Branch merge with node, resolving changes. Return true if any
1254 unresolved conflicts."""
1254 unresolved conflicts."""
1255 repo = ctx.repo()
1255 repo = ctx.repo()
1256 stats = mergemod.merge(ctx, force=force, labels=labels)
1256 stats = mergemod.merge(ctx, force=force, labels=labels)
1257 _showstats(repo, stats)
1257 _showstats(repo, stats)
1258 if stats.unresolvedcount:
1258 if stats.unresolvedcount:
1259 repo.ui.status(
1259 repo.ui.status(
1260 _(
1260 _(
1261 b"use 'hg resolve' to retry unresolved file merges "
1261 b"use 'hg resolve' to retry unresolved file merges "
1262 b"or 'hg merge --abort' to abandon\n"
1262 b"or 'hg merge --abort' to abandon\n"
1263 )
1263 )
1264 )
1264 )
1265 elif remind:
1265 elif remind:
1266 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1266 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1267 return stats.unresolvedcount > 0
1267 return stats.unresolvedcount > 0
1268
1268
1269
1269
1270 def abortmerge(ui, repo):
1270 def abortmerge(ui, repo):
1271 ms = mergestatemod.mergestate.read(repo)
1271 ms = mergestatemod.mergestate.read(repo)
1272 if ms.active():
1272 if ms.active():
1273 # there were conflicts
1273 # there were conflicts
1274 node = ms.localctx.hex()
1274 node = ms.localctx.hex()
1275 else:
1275 else:
1276 # there were no conficts, mergestate was not stored
1276 # there were no conficts, mergestate was not stored
1277 node = repo[b'.'].hex()
1277 node = repo[b'.'].hex()
1278
1278
1279 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1279 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1280 stats = mergemod.clean_update(repo[node])
1280 stats = mergemod.clean_update(repo[node])
1281 assert stats.unresolvedcount == 0
1281 assert stats.unresolvedcount == 0
1282 _showstats(repo, stats)
1282 _showstats(repo, stats)
1283
1283
1284
1284
1285 def _incoming(
1285 def _incoming(
1286 displaychlist,
1286 displaychlist,
1287 subreporecurse,
1287 subreporecurse,
1288 ui,
1288 ui,
1289 repo,
1289 repo,
1290 source,
1290 source,
1291 opts,
1291 opts,
1292 buffered=False,
1292 buffered=False,
1293 subpath=None,
1293 subpath=None,
1294 ):
1294 ):
1295 """
1295 """
1296 Helper for incoming / gincoming.
1296 Helper for incoming / gincoming.
1297 displaychlist gets called with
1297 displaychlist gets called with
1298 (remoterepo, incomingchangesetlist, displayer) parameters,
1298 (remoterepo, incomingchangesetlist, displayer) parameters,
1299 and is supposed to contain only code that can't be unified.
1299 and is supposed to contain only code that can't be unified.
1300 """
1300 """
1301 srcs = urlutil.get_pull_paths(repo, ui, [source])
1301 srcs = urlutil.get_pull_paths(repo, ui, [source])
1302 srcs = list(srcs)
1302 srcs = list(srcs)
1303 if len(srcs) != 1:
1303 if len(srcs) != 1:
1304 msg = _(b'for now, incoming supports only a single source, %d provided')
1304 msg = _(b'for now, incoming supports only a single source, %d provided')
1305 msg %= len(srcs)
1305 msg %= len(srcs)
1306 raise error.Abort(msg)
1306 raise error.Abort(msg)
1307 path = srcs[0]
1307 path = srcs[0]
1308 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1308 source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1309 if subpath is not None:
1309 if subpath is not None:
1310 subpath = urlutil.url(subpath)
1310 subpath = urlutil.url(subpath)
1311 if subpath.isabs():
1311 if subpath.isabs():
1312 source = bytes(subpath)
1312 source = bytes(subpath)
1313 else:
1313 else:
1314 p = urlutil.url(source)
1314 p = urlutil.url(source)
1315 if p.islocal():
1315 if p.islocal():
1316 normpath = os.path.normpath
1316 normpath = os.path.normpath
1317 else:
1317 else:
1318 normpath = posixpath.normpath
1318 normpath = posixpath.normpath
1319 p.path = normpath(b'%s/%s' % (p.path, subpath))
1319 p.path = normpath(b'%s/%s' % (p.path, subpath))
1320 source = bytes(p)
1320 source = bytes(p)
1321 other = peer(repo, opts, source)
1321 other = peer(repo, opts, source)
1322 cleanupfn = other.close
1322 cleanupfn = other.close
1323 try:
1323 try:
1324 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1324 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1325 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1325 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1326
1326
1327 if revs:
1327 if revs:
1328 revs = [other.lookup(rev) for rev in revs]
1328 revs = [other.lookup(rev) for rev in revs]
1329 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1329 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1330 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1330 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1331 )
1331 )
1332
1332
1333 if not chlist:
1333 if not chlist:
1334 ui.status(_(b"no changes found\n"))
1334 ui.status(_(b"no changes found\n"))
1335 return subreporecurse()
1335 return subreporecurse()
1336 ui.pager(b'incoming')
1336 ui.pager(b'incoming')
1337 displayer = logcmdutil.changesetdisplayer(
1337 displayer = logcmdutil.changesetdisplayer(
1338 ui, other, opts, buffered=buffered
1338 ui, other, opts, buffered=buffered
1339 )
1339 )
1340 displaychlist(other, chlist, displayer)
1340 displaychlist(other, chlist, displayer)
1341 displayer.close()
1341 displayer.close()
1342 finally:
1342 finally:
1343 cleanupfn()
1343 cleanupfn()
1344 subreporecurse()
1344 subreporecurse()
1345 return 0 # exit code is zero since we found incoming changes
1345 return 0 # exit code is zero since we found incoming changes
1346
1346
1347
1347
1348 def incoming(ui, repo, source, opts, subpath=None):
1348 def incoming(ui, repo, source, opts, subpath=None):
1349 def subreporecurse():
1349 def subreporecurse():
1350 ret = 1
1350 ret = 1
1351 if opts.get(b'subrepos'):
1351 if opts.get(b'subrepos'):
1352 ctx = repo[None]
1352 ctx = repo[None]
1353 for subpath in sorted(ctx.substate):
1353 for subpath in sorted(ctx.substate):
1354 sub = ctx.sub(subpath)
1354 sub = ctx.sub(subpath)
1355 ret = min(ret, sub.incoming(ui, source, opts))
1355 ret = min(ret, sub.incoming(ui, source, opts))
1356 return ret
1356 return ret
1357
1357
1358 def display(other, chlist, displayer):
1358 def display(other, chlist, displayer):
1359 limit = logcmdutil.getlimit(opts)
1359 limit = logcmdutil.getlimit(opts)
1360 if opts.get(b'newest_first'):
1360 if opts.get(b'newest_first'):
1361 chlist.reverse()
1361 chlist.reverse()
1362 count = 0
1362 count = 0
1363 for n in chlist:
1363 for n in chlist:
1364 if limit is not None and count >= limit:
1364 if limit is not None and count >= limit:
1365 break
1365 break
1366 parents = [
1366 parents = [
1367 p for p in other.changelog.parents(n) if p != repo.nullid
1367 p for p in other.changelog.parents(n) if p != repo.nullid
1368 ]
1368 ]
1369 if opts.get(b'no_merges') and len(parents) == 2:
1369 if opts.get(b'no_merges') and len(parents) == 2:
1370 continue
1370 continue
1371 count += 1
1371 count += 1
1372 displayer.show(other[n])
1372 displayer.show(other[n])
1373
1373
1374 return _incoming(
1374 return _incoming(
1375 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1375 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1376 )
1376 )
1377
1377
1378
1378
1379 def _outgoing(ui, repo, dests, opts, subpath=None):
1379 def _outgoing(ui, repo, dests, opts, subpath=None):
1380 out = set()
1380 out = set()
1381 others = []
1381 others = []
1382 for path in urlutil.get_push_paths(repo, ui, dests):
1382 for path in urlutil.get_push_paths(repo, ui, dests):
1383 dest = path.pushloc or path.loc
1383 dest = path.loc
1384 if subpath is not None:
1384 if subpath is not None:
1385 subpath = urlutil.url(subpath)
1385 subpath = urlutil.url(subpath)
1386 if subpath.isabs():
1386 if subpath.isabs():
1387 dest = bytes(subpath)
1387 dest = bytes(subpath)
1388 else:
1388 else:
1389 p = urlutil.url(dest)
1389 p = urlutil.url(dest)
1390 if p.islocal():
1390 if p.islocal():
1391 normpath = os.path.normpath
1391 normpath = os.path.normpath
1392 else:
1392 else:
1393 normpath = posixpath.normpath
1393 normpath = posixpath.normpath
1394 p.path = normpath(b'%s/%s' % (p.path, subpath))
1394 p.path = normpath(b'%s/%s' % (p.path, subpath))
1395 dest = bytes(p)
1395 dest = bytes(p)
1396 branches = path.branch, opts.get(b'branch') or []
1396 branches = path.branch, opts.get(b'branch') or []
1397
1397
1398 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1398 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1399 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1399 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1400 if revs:
1400 if revs:
1401 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1401 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1402
1402
1403 other = peer(repo, opts, dest)
1403 other = peer(repo, opts, dest)
1404 try:
1404 try:
1405 outgoing = discovery.findcommonoutgoing(
1405 outgoing = discovery.findcommonoutgoing(
1406 repo, other, revs, force=opts.get(b'force')
1406 repo, other, revs, force=opts.get(b'force')
1407 )
1407 )
1408 o = outgoing.missing
1408 o = outgoing.missing
1409 out.update(o)
1409 out.update(o)
1410 if not o:
1410 if not o:
1411 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1411 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1412 others.append(other)
1412 others.append(other)
1413 except: # re-raises
1413 except: # re-raises
1414 other.close()
1414 other.close()
1415 raise
1415 raise
1416 # make sure this is ordered by revision number
1416 # make sure this is ordered by revision number
1417 outgoing_revs = list(out)
1417 outgoing_revs = list(out)
1418 cl = repo.changelog
1418 cl = repo.changelog
1419 outgoing_revs.sort(key=cl.rev)
1419 outgoing_revs.sort(key=cl.rev)
1420 return outgoing_revs, others
1420 return outgoing_revs, others
1421
1421
1422
1422
1423 def _outgoing_recurse(ui, repo, dests, opts):
1423 def _outgoing_recurse(ui, repo, dests, opts):
1424 ret = 1
1424 ret = 1
1425 if opts.get(b'subrepos'):
1425 if opts.get(b'subrepos'):
1426 ctx = repo[None]
1426 ctx = repo[None]
1427 for subpath in sorted(ctx.substate):
1427 for subpath in sorted(ctx.substate):
1428 sub = ctx.sub(subpath)
1428 sub = ctx.sub(subpath)
1429 ret = min(ret, sub.outgoing(ui, dests, opts))
1429 ret = min(ret, sub.outgoing(ui, dests, opts))
1430 return ret
1430 return ret
1431
1431
1432
1432
1433 def _outgoing_filter(repo, revs, opts):
1433 def _outgoing_filter(repo, revs, opts):
1434 """apply revision filtering/ordering option for outgoing"""
1434 """apply revision filtering/ordering option for outgoing"""
1435 limit = logcmdutil.getlimit(opts)
1435 limit = logcmdutil.getlimit(opts)
1436 no_merges = opts.get(b'no_merges')
1436 no_merges = opts.get(b'no_merges')
1437 if opts.get(b'newest_first'):
1437 if opts.get(b'newest_first'):
1438 revs.reverse()
1438 revs.reverse()
1439 if limit is None and not no_merges:
1439 if limit is None and not no_merges:
1440 for r in revs:
1440 for r in revs:
1441 yield r
1441 yield r
1442 return
1442 return
1443
1443
1444 count = 0
1444 count = 0
1445 cl = repo.changelog
1445 cl = repo.changelog
1446 for n in revs:
1446 for n in revs:
1447 if limit is not None and count >= limit:
1447 if limit is not None and count >= limit:
1448 break
1448 break
1449 parents = [p for p in cl.parents(n) if p != repo.nullid]
1449 parents = [p for p in cl.parents(n) if p != repo.nullid]
1450 if no_merges and len(parents) == 2:
1450 if no_merges and len(parents) == 2:
1451 continue
1451 continue
1452 count += 1
1452 count += 1
1453 yield n
1453 yield n
1454
1454
1455
1455
1456 def outgoing(ui, repo, dests, opts, subpath=None):
1456 def outgoing(ui, repo, dests, opts, subpath=None):
1457 if opts.get(b'graph'):
1457 if opts.get(b'graph'):
1458 logcmdutil.checkunsupportedgraphflags([], opts)
1458 logcmdutil.checkunsupportedgraphflags([], opts)
1459 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1459 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1460 ret = 1
1460 ret = 1
1461 try:
1461 try:
1462 if o:
1462 if o:
1463 ret = 0
1463 ret = 0
1464
1464
1465 if opts.get(b'graph'):
1465 if opts.get(b'graph'):
1466 revdag = logcmdutil.graphrevs(repo, o, opts)
1466 revdag = logcmdutil.graphrevs(repo, o, opts)
1467 ui.pager(b'outgoing')
1467 ui.pager(b'outgoing')
1468 displayer = logcmdutil.changesetdisplayer(
1468 displayer = logcmdutil.changesetdisplayer(
1469 ui, repo, opts, buffered=True
1469 ui, repo, opts, buffered=True
1470 )
1470 )
1471 logcmdutil.displaygraph(
1471 logcmdutil.displaygraph(
1472 ui, repo, revdag, displayer, graphmod.asciiedges
1472 ui, repo, revdag, displayer, graphmod.asciiedges
1473 )
1473 )
1474 else:
1474 else:
1475 ui.pager(b'outgoing')
1475 ui.pager(b'outgoing')
1476 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1476 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1477 for n in _outgoing_filter(repo, o, opts):
1477 for n in _outgoing_filter(repo, o, opts):
1478 displayer.show(repo[n])
1478 displayer.show(repo[n])
1479 displayer.close()
1479 displayer.close()
1480 for oth in others:
1480 for oth in others:
1481 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1481 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1482 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1482 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1483 return ret # exit code is zero since we found outgoing changes
1483 return ret # exit code is zero since we found outgoing changes
1484 finally:
1484 finally:
1485 for oth in others:
1485 for oth in others:
1486 oth.close()
1486 oth.close()
1487
1487
1488
1488
1489 def verify(repo, level=None):
1489 def verify(repo, level=None):
1490 """verify the consistency of a repository"""
1490 """verify the consistency of a repository"""
1491 ret = verifymod.verify(repo, level=level)
1491 ret = verifymod.verify(repo, level=level)
1492
1492
1493 # Broken subrepo references in hidden csets don't seem worth worrying about,
1493 # Broken subrepo references in hidden csets don't seem worth worrying about,
1494 # since they can't be pushed/pulled, and --hidden can be used if they are a
1494 # since they can't be pushed/pulled, and --hidden can be used if they are a
1495 # concern.
1495 # concern.
1496
1496
1497 # pathto() is needed for -R case
1497 # pathto() is needed for -R case
1498 revs = repo.revs(
1498 revs = repo.revs(
1499 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1499 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1500 )
1500 )
1501
1501
1502 if revs:
1502 if revs:
1503 repo.ui.status(_(b'checking subrepo links\n'))
1503 repo.ui.status(_(b'checking subrepo links\n'))
1504 for rev in revs:
1504 for rev in revs:
1505 ctx = repo[rev]
1505 ctx = repo[rev]
1506 try:
1506 try:
1507 for subpath in ctx.substate:
1507 for subpath in ctx.substate:
1508 try:
1508 try:
1509 ret = (
1509 ret = (
1510 ctx.sub(subpath, allowcreate=False).verify() or ret
1510 ctx.sub(subpath, allowcreate=False).verify() or ret
1511 )
1511 )
1512 except error.RepoError as e:
1512 except error.RepoError as e:
1513 repo.ui.warn(b'%d: %s\n' % (rev, e))
1513 repo.ui.warn(b'%d: %s\n' % (rev, e))
1514 except Exception:
1514 except Exception:
1515 repo.ui.warn(
1515 repo.ui.warn(
1516 _(b'.hgsubstate is corrupt in revision %s\n')
1516 _(b'.hgsubstate is corrupt in revision %s\n')
1517 % short(ctx.node())
1517 % short(ctx.node())
1518 )
1518 )
1519
1519
1520 return ret
1520 return ret
1521
1521
1522
1522
1523 def remoteui(src, opts):
1523 def remoteui(src, opts):
1524 """build a remote ui from ui or repo and opts"""
1524 """build a remote ui from ui or repo and opts"""
1525 if util.safehasattr(src, b'baseui'): # looks like a repository
1525 if util.safehasattr(src, b'baseui'): # looks like a repository
1526 dst = src.baseui.copy() # drop repo-specific config
1526 dst = src.baseui.copy() # drop repo-specific config
1527 src = src.ui # copy target options from repo
1527 src = src.ui # copy target options from repo
1528 else: # assume it's a global ui object
1528 else: # assume it's a global ui object
1529 dst = src.copy() # keep all global options
1529 dst = src.copy() # keep all global options
1530
1530
1531 # copy ssh-specific options
1531 # copy ssh-specific options
1532 for o in b'ssh', b'remotecmd':
1532 for o in b'ssh', b'remotecmd':
1533 v = opts.get(o) or src.config(b'ui', o)
1533 v = opts.get(o) or src.config(b'ui', o)
1534 if v:
1534 if v:
1535 dst.setconfig(b"ui", o, v, b'copied')
1535 dst.setconfig(b"ui", o, v, b'copied')
1536
1536
1537 # copy bundle-specific options
1537 # copy bundle-specific options
1538 r = src.config(b'bundle', b'mainreporoot')
1538 r = src.config(b'bundle', b'mainreporoot')
1539 if r:
1539 if r:
1540 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1540 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1541
1541
1542 # copy selected local settings to the remote ui
1542 # copy selected local settings to the remote ui
1543 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1543 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1544 for key, val in src.configitems(sect):
1544 for key, val in src.configitems(sect):
1545 dst.setconfig(sect, key, val, b'copied')
1545 dst.setconfig(sect, key, val, b'copied')
1546 v = src.config(b'web', b'cacerts')
1546 v = src.config(b'web', b'cacerts')
1547 if v:
1547 if v:
1548 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1548 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1549
1549
1550 return dst
1550 return dst
1551
1551
1552
1552
1553 # Files of interest
1553 # Files of interest
1554 # Used to check if the repository has changed looking at mtime and size of
1554 # Used to check if the repository has changed looking at mtime and size of
1555 # these files.
1555 # these files.
1556 foi = [
1556 foi = [
1557 (b'spath', b'00changelog.i'),
1557 (b'spath', b'00changelog.i'),
1558 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1558 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1559 (b'spath', b'obsstore'),
1559 (b'spath', b'obsstore'),
1560 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1560 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1561 ]
1561 ]
1562
1562
1563
1563
1564 class cachedlocalrepo:
1564 class cachedlocalrepo:
1565 """Holds a localrepository that can be cached and reused."""
1565 """Holds a localrepository that can be cached and reused."""
1566
1566
1567 def __init__(self, repo):
1567 def __init__(self, repo):
1568 """Create a new cached repo from an existing repo.
1568 """Create a new cached repo from an existing repo.
1569
1569
1570 We assume the passed in repo was recently created. If the
1570 We assume the passed in repo was recently created. If the
1571 repo has changed between when it was created and when it was
1571 repo has changed between when it was created and when it was
1572 turned into a cache, it may not refresh properly.
1572 turned into a cache, it may not refresh properly.
1573 """
1573 """
1574 assert isinstance(repo, localrepo.localrepository)
1574 assert isinstance(repo, localrepo.localrepository)
1575 self._repo = repo
1575 self._repo = repo
1576 self._state, self.mtime = self._repostate()
1576 self._state, self.mtime = self._repostate()
1577 self._filtername = repo.filtername
1577 self._filtername = repo.filtername
1578
1578
1579 def fetch(self):
1579 def fetch(self):
1580 """Refresh (if necessary) and return a repository.
1580 """Refresh (if necessary) and return a repository.
1581
1581
1582 If the cached instance is out of date, it will be recreated
1582 If the cached instance is out of date, it will be recreated
1583 automatically and returned.
1583 automatically and returned.
1584
1584
1585 Returns a tuple of the repo and a boolean indicating whether a new
1585 Returns a tuple of the repo and a boolean indicating whether a new
1586 repo instance was created.
1586 repo instance was created.
1587 """
1587 """
1588 # We compare the mtimes and sizes of some well-known files to
1588 # We compare the mtimes and sizes of some well-known files to
1589 # determine if the repo changed. This is not precise, as mtimes
1589 # determine if the repo changed. This is not precise, as mtimes
1590 # are susceptible to clock skew and imprecise filesystems and
1590 # are susceptible to clock skew and imprecise filesystems and
1591 # file content can change while maintaining the same size.
1591 # file content can change while maintaining the same size.
1592
1592
1593 state, mtime = self._repostate()
1593 state, mtime = self._repostate()
1594 if state == self._state:
1594 if state == self._state:
1595 return self._repo, False
1595 return self._repo, False
1596
1596
1597 repo = repository(self._repo.baseui, self._repo.url())
1597 repo = repository(self._repo.baseui, self._repo.url())
1598 if self._filtername:
1598 if self._filtername:
1599 self._repo = repo.filtered(self._filtername)
1599 self._repo = repo.filtered(self._filtername)
1600 else:
1600 else:
1601 self._repo = repo.unfiltered()
1601 self._repo = repo.unfiltered()
1602 self._state = state
1602 self._state = state
1603 self.mtime = mtime
1603 self.mtime = mtime
1604
1604
1605 return self._repo, True
1605 return self._repo, True
1606
1606
1607 def _repostate(self):
1607 def _repostate(self):
1608 state = []
1608 state = []
1609 maxmtime = -1
1609 maxmtime = -1
1610 for attr, fname in foi:
1610 for attr, fname in foi:
1611 prefix = getattr(self._repo, attr)
1611 prefix = getattr(self._repo, attr)
1612 p = os.path.join(prefix, fname)
1612 p = os.path.join(prefix, fname)
1613 try:
1613 try:
1614 st = os.stat(p)
1614 st = os.stat(p)
1615 except OSError:
1615 except OSError:
1616 st = os.stat(prefix)
1616 st = os.stat(prefix)
1617 state.append((st[stat.ST_MTIME], st.st_size))
1617 state.append((st[stat.ST_MTIME], st.st_size))
1618 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1618 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1619
1619
1620 return tuple(state), maxmtime
1620 return tuple(state), maxmtime
1621
1621
1622 def copy(self):
1622 def copy(self):
1623 """Obtain a copy of this class instance.
1623 """Obtain a copy of this class instance.
1624
1624
1625 A new localrepository instance is obtained. The new instance should be
1625 A new localrepository instance is obtained. The new instance should be
1626 completely independent of the original.
1626 completely independent of the original.
1627 """
1627 """
1628 repo = repository(self._repo.baseui, self._repo.origroot)
1628 repo = repository(self._repo.baseui, self._repo.origroot)
1629 if self._filtername:
1629 if self._filtername:
1630 repo = repo.filtered(self._filtername)
1630 repo = repo.filtered(self._filtername)
1631 else:
1631 else:
1632 repo = repo.unfiltered()
1632 repo = repo.unfiltered()
1633 c = cachedlocalrepo(repo)
1633 c = cachedlocalrepo(repo)
1634 c._state = self._state
1634 c._state = self._state
1635 c.mtime = self.mtime
1635 c.mtime = self.mtime
1636 return c
1636 return c
General Comments 0
You need to be logged in to leave comments. Login now