##// END OF EJS Templates
subrepo: propagate non-default path on outgoing...
Felipe Resende -
r52886:3e0f86f0 stable
parent child Browse files
Show More
@@ -1,1680 +1,1682 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 bundlerepo,
25 bundlerepo,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 graphmod,
32 graphmod,
33 httppeer,
33 httppeer,
34 localrepo,
34 localrepo,
35 lock,
35 lock,
36 logcmdutil,
36 logcmdutil,
37 logexchange,
37 logexchange,
38 merge as mergemod,
38 merge as mergemod,
39 mergestate as mergestatemod,
39 mergestate as mergestatemod,
40 narrowspec,
40 narrowspec,
41 phases,
41 phases,
42 requirements,
42 requirements,
43 scmutil,
43 scmutil,
44 sshpeer,
44 sshpeer,
45 statichttprepo,
45 statichttprepo,
46 ui as uimod,
46 ui as uimod,
47 unionrepo,
47 unionrepo,
48 url,
48 url,
49 util,
49 util,
50 verify as verifymod,
50 verify as verifymod,
51 vfs as vfsmod,
51 vfs as vfsmod,
52 )
52 )
53 from .interfaces import repository as repositorymod
53 from .interfaces import repository as repositorymod
54 from .utils import (
54 from .utils import (
55 hashutil,
55 hashutil,
56 stringutil,
56 stringutil,
57 urlutil,
57 urlutil,
58 )
58 )
59
59
60
60
61 release = lock.release
61 release = lock.release
62
62
63 # shared features
63 # shared features
64 sharedbookmarks = b'bookmarks'
64 sharedbookmarks = b'bookmarks'
65
65
66
66
67 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
67 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
68 if hasattr(other, 'peer'):
68 if hasattr(other, 'peer'):
69 # a courtesy to callers using a localrepo for other
69 # a courtesy to callers using a localrepo for other
70 peer = other.peer(remotehidden=remotehidden)
70 peer = other.peer(remotehidden=remotehidden)
71 else:
71 else:
72 peer = other
72 peer = other
73 hashbranch, branches = branches
73 hashbranch, branches = branches
74 if not hashbranch and not branches:
74 if not hashbranch and not branches:
75 x = revs or None
75 x = revs or None
76 if revs:
76 if revs:
77 y = revs[0]
77 y = revs[0]
78 else:
78 else:
79 y = None
79 y = None
80 return x, y
80 return x, y
81 if revs:
81 if revs:
82 revs = list(revs)
82 revs = list(revs)
83 else:
83 else:
84 revs = []
84 revs = []
85
85
86 if not peer.capable(b'branchmap'):
86 if not peer.capable(b'branchmap'):
87 if branches:
87 if branches:
88 raise error.Abort(_(b"remote branch lookup not supported"))
88 raise error.Abort(_(b"remote branch lookup not supported"))
89 revs.append(hashbranch)
89 revs.append(hashbranch)
90 return revs, revs[0]
90 return revs, revs[0]
91
91
92 with peer.commandexecutor() as e:
92 with peer.commandexecutor() as e:
93 branchmap = e.callcommand(b'branchmap', {}).result()
93 branchmap = e.callcommand(b'branchmap', {}).result()
94
94
95 def primary(branch):
95 def primary(branch):
96 if branch == b'.':
96 if branch == b'.':
97 if not lrepo:
97 if not lrepo:
98 raise error.Abort(_(b"dirstate branch not accessible"))
98 raise error.Abort(_(b"dirstate branch not accessible"))
99 branch = lrepo.dirstate.branch()
99 branch = lrepo.dirstate.branch()
100 if branch in branchmap:
100 if branch in branchmap:
101 revs.extend(hex(r) for r in reversed(branchmap[branch]))
101 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 return True
102 return True
103 else:
103 else:
104 return False
104 return False
105
105
106 for branch in branches:
106 for branch in branches:
107 if not primary(branch):
107 if not primary(branch):
108 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
108 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 if hashbranch:
109 if hashbranch:
110 if not primary(hashbranch):
110 if not primary(hashbranch):
111 revs.append(hashbranch)
111 revs.append(hashbranch)
112 return revs, revs[0]
112 return revs, revs[0]
113
113
114
114
115 def _isfile(path):
115 def _isfile(path):
116 try:
116 try:
117 # we use os.stat() directly here instead of os.path.isfile()
117 # we use os.stat() directly here instead of os.path.isfile()
118 # because the latter started returning `False` on invalid path
118 # because the latter started returning `False` on invalid path
119 # exceptions starting in 3.8 and we care about handling
119 # exceptions starting in 3.8 and we care about handling
120 # invalid paths specially here.
120 # invalid paths specially here.
121 st = os.stat(path)
121 st = os.stat(path)
122 except ValueError as e:
122 except ValueError as e:
123 msg = stringutil.forcebytestr(e)
123 msg = stringutil.forcebytestr(e)
124 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
124 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 except OSError:
125 except OSError:
126 return False
126 return False
127 else:
127 else:
128 return stat.S_ISREG(st.st_mode)
128 return stat.S_ISREG(st.st_mode)
129
129
130
130
131 class LocalFactory:
131 class LocalFactory:
132 """thin wrapper to dispatch between localrepo and bundle repo"""
132 """thin wrapper to dispatch between localrepo and bundle repo"""
133
133
134 @staticmethod
134 @staticmethod
135 def islocal(path: bytes) -> bool:
135 def islocal(path: bytes) -> bool:
136 path = util.expandpath(urlutil.urllocalpath(path))
136 path = util.expandpath(urlutil.urllocalpath(path))
137 return not _isfile(path)
137 return not _isfile(path)
138
138
139 @staticmethod
139 @staticmethod
140 def instance(ui, path, *args, **kwargs):
140 def instance(ui, path, *args, **kwargs):
141 path = util.expandpath(urlutil.urllocalpath(path))
141 path = util.expandpath(urlutil.urllocalpath(path))
142 if _isfile(path):
142 if _isfile(path):
143 cls = bundlerepo
143 cls = bundlerepo
144 else:
144 else:
145 cls = localrepo
145 cls = localrepo
146 return cls.instance(ui, path, *args, **kwargs)
146 return cls.instance(ui, path, *args, **kwargs)
147
147
148
148
149 repo_schemes = {
149 repo_schemes = {
150 b'bundle': bundlerepo,
150 b'bundle': bundlerepo,
151 b'union': unionrepo,
151 b'union': unionrepo,
152 b'file': LocalFactory,
152 b'file': LocalFactory,
153 }
153 }
154
154
155 peer_schemes = {
155 peer_schemes = {
156 b'http': httppeer,
156 b'http': httppeer,
157 b'https': httppeer,
157 b'https': httppeer,
158 b'ssh': sshpeer,
158 b'ssh': sshpeer,
159 b'static-http': statichttprepo,
159 b'static-http': statichttprepo,
160 }
160 }
161
161
162
162
163 def islocal(repo):
163 def islocal(repo):
164 '''return true if repo (or path pointing to repo) is local'''
164 '''return true if repo (or path pointing to repo) is local'''
165 if isinstance(repo, bytes):
165 if isinstance(repo, bytes):
166 u = urlutil.url(repo)
166 u = urlutil.url(repo)
167 scheme = u.scheme or b'file'
167 scheme = u.scheme or b'file'
168 if scheme in peer_schemes:
168 if scheme in peer_schemes:
169 cls = peer_schemes[scheme]
169 cls = peer_schemes[scheme]
170 cls.make_peer # make sure we load the module
170 cls.make_peer # make sure we load the module
171 elif scheme in repo_schemes:
171 elif scheme in repo_schemes:
172 cls = repo_schemes[scheme]
172 cls = repo_schemes[scheme]
173 cls.instance # make sure we load the module
173 cls.instance # make sure we load the module
174 else:
174 else:
175 cls = LocalFactory
175 cls = LocalFactory
176 if hasattr(cls, 'islocal'):
176 if hasattr(cls, 'islocal'):
177 return cls.islocal(repo) # pytype: disable=module-attr
177 return cls.islocal(repo) # pytype: disable=module-attr
178 return False
178 return False
179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
179 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 return repo.local()
180 return repo.local()
181
181
182
182
183 def openpath(ui, path, sendaccept=True):
183 def openpath(ui, path, sendaccept=True):
184 '''open path with open if local, url.open if remote'''
184 '''open path with open if local, url.open if remote'''
185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
185 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 if pathurl.islocal():
186 if pathurl.islocal():
187 return util.posixfile(pathurl.localpath(), b'rb')
187 return util.posixfile(pathurl.localpath(), b'rb')
188 else:
188 else:
189 return url.open(ui, path, sendaccept=sendaccept)
189 return url.open(ui, path, sendaccept=sendaccept)
190
190
191
191
192 # a list of (ui, repo) functions called for wire peer initialization
192 # a list of (ui, repo) functions called for wire peer initialization
193 wirepeersetupfuncs = []
193 wirepeersetupfuncs = []
194
194
195
195
196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
196 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 ui = getattr(obj, "ui", ui)
197 ui = getattr(obj, "ui", ui)
198 for f in presetupfuncs or []:
198 for f in presetupfuncs or []:
199 f(ui, obj)
199 f(ui, obj)
200 ui.log(b'extension', b'- executing reposetup hooks\n')
200 ui.log(b'extension', b'- executing reposetup hooks\n')
201 with util.timedcm('all reposetup') as allreposetupstats:
201 with util.timedcm('all reposetup') as allreposetupstats:
202 for name, module in extensions.extensions(ui):
202 for name, module in extensions.extensions(ui):
203 ui.log(b'extension', b' - running reposetup for %s\n', name)
203 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 hook = getattr(module, 'reposetup', None)
204 hook = getattr(module, 'reposetup', None)
205 if hook:
205 if hook:
206 with util.timedcm('reposetup %r', name) as stats:
206 with util.timedcm('reposetup %r', name) as stats:
207 hook(ui, obj)
207 hook(ui, obj)
208 msg = b' > reposetup for %s took %s\n'
208 msg = b' > reposetup for %s took %s\n'
209 ui.log(b'extension', msg, name, stats)
209 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 if not obj.local():
211 if not obj.local():
212 for f in wirepeersetupfuncs:
212 for f in wirepeersetupfuncs:
213 f(ui, obj)
213 f(ui, obj)
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 scheme = urlutil.url(path).scheme
225 scheme = urlutil.url(path).scheme
226 if scheme is None:
226 if scheme is None:
227 scheme = b'file'
227 scheme = b'file'
228 cls = repo_schemes.get(scheme)
228 cls = repo_schemes.get(scheme)
229 if cls is None:
229 if cls is None:
230 if scheme in peer_schemes:
230 if scheme in peer_schemes:
231 raise error.Abort(_(b"repository '%s' is not local") % path)
231 raise error.Abort(_(b"repository '%s' is not local") % path)
232 cls = LocalFactory
232 cls = LocalFactory
233 repo = cls.instance(
233 repo = cls.instance(
234 ui,
234 ui,
235 path,
235 path,
236 create,
236 create,
237 intents=intents,
237 intents=intents,
238 createopts=createopts,
238 createopts=createopts,
239 )
239 )
240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
240 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 return repo.filtered(b'visible')
241 return repo.filtered(b'visible')
242
242
243
243
244 def peer(
244 def peer(
245 uiorrepo,
245 uiorrepo,
246 opts,
246 opts,
247 path,
247 path,
248 create=False,
248 create=False,
249 intents=None,
249 intents=None,
250 createopts=None,
250 createopts=None,
251 remotehidden=False,
251 remotehidden=False,
252 ):
252 ):
253 '''return a repository peer for the specified path'''
253 '''return a repository peer for the specified path'''
254 ui = getattr(uiorrepo, 'ui', uiorrepo)
254 ui = getattr(uiorrepo, 'ui', uiorrepo)
255 rui = remoteui(uiorrepo, opts)
255 rui = remoteui(uiorrepo, opts)
256 if hasattr(path, 'url'):
256 if hasattr(path, 'url'):
257 # this is already a urlutil.path object
257 # this is already a urlutil.path object
258 peer_path = path
258 peer_path = path
259 else:
259 else:
260 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
260 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
261 scheme = peer_path.url.scheme # pytype: disable=attribute-error
261 scheme = peer_path.url.scheme # pytype: disable=attribute-error
262 if scheme in peer_schemes:
262 if scheme in peer_schemes:
263 cls = peer_schemes[scheme]
263 cls = peer_schemes[scheme]
264 peer = cls.make_peer(
264 peer = cls.make_peer(
265 rui,
265 rui,
266 peer_path,
266 peer_path,
267 create,
267 create,
268 intents=intents,
268 intents=intents,
269 createopts=createopts,
269 createopts=createopts,
270 remotehidden=remotehidden,
270 remotehidden=remotehidden,
271 )
271 )
272 _setup_repo_or_peer(rui, peer)
272 _setup_repo_or_peer(rui, peer)
273 else:
273 else:
274 # this is a repository
274 # this is a repository
275 repo_path = peer_path.loc # pytype: disable=attribute-error
275 repo_path = peer_path.loc # pytype: disable=attribute-error
276 if not repo_path:
276 if not repo_path:
277 repo_path = peer_path.rawloc # pytype: disable=attribute-error
277 repo_path = peer_path.rawloc # pytype: disable=attribute-error
278 repo = repository(
278 repo = repository(
279 rui,
279 rui,
280 repo_path,
280 repo_path,
281 create,
281 create,
282 intents=intents,
282 intents=intents,
283 createopts=createopts,
283 createopts=createopts,
284 )
284 )
285 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
285 peer = repo.peer(path=peer_path, remotehidden=remotehidden)
286 return peer
286 return peer
287
287
288
288
289 def defaultdest(source):
289 def defaultdest(source):
290 """return default destination of clone if none is given
290 """return default destination of clone if none is given
291
291
292 >>> defaultdest(b'foo')
292 >>> defaultdest(b'foo')
293 'foo'
293 'foo'
294 >>> defaultdest(b'/foo/bar')
294 >>> defaultdest(b'/foo/bar')
295 'bar'
295 'bar'
296 >>> defaultdest(b'/')
296 >>> defaultdest(b'/')
297 ''
297 ''
298 >>> defaultdest(b'')
298 >>> defaultdest(b'')
299 ''
299 ''
300 >>> defaultdest(b'http://example.org/')
300 >>> defaultdest(b'http://example.org/')
301 ''
301 ''
302 >>> defaultdest(b'http://example.org/foo/')
302 >>> defaultdest(b'http://example.org/foo/')
303 'foo'
303 'foo'
304 """
304 """
305 path = urlutil.url(source).path
305 path = urlutil.url(source).path
306 if not path:
306 if not path:
307 return b''
307 return b''
308 return os.path.basename(os.path.normpath(path))
308 return os.path.basename(os.path.normpath(path))
309
309
310
310
311 def sharedreposource(repo):
311 def sharedreposource(repo):
312 """Returns repository object for source repository of a shared repo.
312 """Returns repository object for source repository of a shared repo.
313
313
314 If repo is not a shared repository, returns None.
314 If repo is not a shared repository, returns None.
315 """
315 """
316 if repo.sharedpath == repo.path:
316 if repo.sharedpath == repo.path:
317 return None
317 return None
318
318
319 if hasattr(repo, 'srcrepo') and repo.srcrepo:
319 if hasattr(repo, 'srcrepo') and repo.srcrepo:
320 return repo.srcrepo
320 return repo.srcrepo
321
321
322 # the sharedpath always ends in the .hg; we want the path to the repo
322 # the sharedpath always ends in the .hg; we want the path to the repo
323 source = repo.vfs.split(repo.sharedpath)[0]
323 source = repo.vfs.split(repo.sharedpath)[0]
324 srcurl, branches = urlutil.parseurl(source)
324 srcurl, branches = urlutil.parseurl(source)
325 srcrepo = repository(repo.ui, srcurl)
325 srcrepo = repository(repo.ui, srcurl)
326 repo.srcrepo = srcrepo
326 repo.srcrepo = srcrepo
327 return srcrepo
327 return srcrepo
328
328
329
329
330 def share(
330 def share(
331 ui,
331 ui,
332 source,
332 source,
333 dest=None,
333 dest=None,
334 update=True,
334 update=True,
335 bookmarks=True,
335 bookmarks=True,
336 defaultpath=None,
336 defaultpath=None,
337 relative=False,
337 relative=False,
338 ):
338 ):
339 '''create a shared repository'''
339 '''create a shared repository'''
340
340
341 not_local_msg = _(b'can only share local repositories')
341 not_local_msg = _(b'can only share local repositories')
342 if hasattr(source, 'local'):
342 if hasattr(source, 'local'):
343 if source.local() is None:
343 if source.local() is None:
344 raise error.Abort(not_local_msg)
344 raise error.Abort(not_local_msg)
345 elif not islocal(source):
345 elif not islocal(source):
346 # XXX why are we getting bytes here ?
346 # XXX why are we getting bytes here ?
347 raise error.Abort(not_local_msg)
347 raise error.Abort(not_local_msg)
348
348
349 if not dest:
349 if not dest:
350 dest = defaultdest(source)
350 dest = defaultdest(source)
351 else:
351 else:
352 dest = urlutil.get_clone_path_obj(ui, dest).loc
352 dest = urlutil.get_clone_path_obj(ui, dest).loc
353
353
354 if isinstance(source, bytes):
354 if isinstance(source, bytes):
355 source_path = urlutil.get_clone_path_obj(ui, source)
355 source_path = urlutil.get_clone_path_obj(ui, source)
356 srcrepo = repository(ui, source_path.loc)
356 srcrepo = repository(ui, source_path.loc)
357 branches = (source_path.branch, [])
357 branches = (source_path.branch, [])
358 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
358 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
359 else:
359 else:
360 srcrepo = source.local()
360 srcrepo = source.local()
361 checkout = None
361 checkout = None
362
362
363 shareditems = set()
363 shareditems = set()
364 if bookmarks:
364 if bookmarks:
365 shareditems.add(sharedbookmarks)
365 shareditems.add(sharedbookmarks)
366
366
367 r = repository(
367 r = repository(
368 ui,
368 ui,
369 dest,
369 dest,
370 create=True,
370 create=True,
371 createopts={
371 createopts={
372 b'sharedrepo': srcrepo,
372 b'sharedrepo': srcrepo,
373 b'sharedrelative': relative,
373 b'sharedrelative': relative,
374 b'shareditems': shareditems,
374 b'shareditems': shareditems,
375 },
375 },
376 )
376 )
377
377
378 postshare(srcrepo, r, defaultpath=defaultpath)
378 postshare(srcrepo, r, defaultpath=defaultpath)
379 r = repository(ui, dest)
379 r = repository(ui, dest)
380 _postshareupdate(r, update, checkout=checkout)
380 _postshareupdate(r, update, checkout=checkout)
381 return r
381 return r
382
382
383
383
384 def _prependsourcehgrc(repo):
384 def _prependsourcehgrc(repo):
385 """copies the source repo config and prepend it in current repo .hg/hgrc
385 """copies the source repo config and prepend it in current repo .hg/hgrc
386 on unshare. This is only done if the share was perfomed using share safe
386 on unshare. This is only done if the share was perfomed using share safe
387 method where we share config of source in shares"""
387 method where we share config of source in shares"""
388 srcvfs = vfsmod.vfs(repo.sharedpath)
388 srcvfs = vfsmod.vfs(repo.sharedpath)
389 dstvfs = vfsmod.vfs(repo.path)
389 dstvfs = vfsmod.vfs(repo.path)
390
390
391 if not srcvfs.exists(b'hgrc'):
391 if not srcvfs.exists(b'hgrc'):
392 return
392 return
393
393
394 currentconfig = b''
394 currentconfig = b''
395 if dstvfs.exists(b'hgrc'):
395 if dstvfs.exists(b'hgrc'):
396 currentconfig = dstvfs.read(b'hgrc')
396 currentconfig = dstvfs.read(b'hgrc')
397
397
398 with dstvfs(b'hgrc', b'wb') as fp:
398 with dstvfs(b'hgrc', b'wb') as fp:
399 sourceconfig = srcvfs.read(b'hgrc')
399 sourceconfig = srcvfs.read(b'hgrc')
400 fp.write(b"# Config copied from shared source\n")
400 fp.write(b"# Config copied from shared source\n")
401 fp.write(sourceconfig)
401 fp.write(sourceconfig)
402 fp.write(b'\n')
402 fp.write(b'\n')
403 fp.write(currentconfig)
403 fp.write(currentconfig)
404
404
405
405
406 def unshare(ui, repo):
406 def unshare(ui, repo):
407 """convert a shared repository to a normal one
407 """convert a shared repository to a normal one
408
408
409 Copy the store data to the repo and remove the sharedpath data.
409 Copy the store data to the repo and remove the sharedpath data.
410
410
411 Returns a new repository object representing the unshared repository.
411 Returns a new repository object representing the unshared repository.
412
412
413 The passed repository object is not usable after this function is
413 The passed repository object is not usable after this function is
414 called.
414 called.
415 """
415 """
416
416
417 with repo.lock():
417 with repo.lock():
418 # we use locks here because if we race with commit, we
418 # we use locks here because if we race with commit, we
419 # can end up with extra data in the cloned revlogs that's
419 # can end up with extra data in the cloned revlogs that's
420 # not pointed to by changesets, thus causing verify to
420 # not pointed to by changesets, thus causing verify to
421 # fail
421 # fail
422 destlock = copystore(ui, repo, repo.path)
422 destlock = copystore(ui, repo, repo.path)
423 with destlock or util.nullcontextmanager():
423 with destlock or util.nullcontextmanager():
424 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
424 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
425 # we were sharing .hg/hgrc of the share source with the current
425 # we were sharing .hg/hgrc of the share source with the current
426 # repo. We need to copy that while unsharing otherwise it can
426 # repo. We need to copy that while unsharing otherwise it can
427 # disable hooks and other checks
427 # disable hooks and other checks
428 _prependsourcehgrc(repo)
428 _prependsourcehgrc(repo)
429
429
430 sharefile = repo.vfs.join(b'sharedpath')
430 sharefile = repo.vfs.join(b'sharedpath')
431 util.rename(sharefile, sharefile + b'.old')
431 util.rename(sharefile, sharefile + b'.old')
432
432
433 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
433 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
434 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
434 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
435 scmutil.writereporequirements(repo)
435 scmutil.writereporequirements(repo)
436
436
437 # Removing share changes some fundamental properties of the repo instance.
437 # Removing share changes some fundamental properties of the repo instance.
438 # So we instantiate a new repo object and operate on it rather than
438 # So we instantiate a new repo object and operate on it rather than
439 # try to keep the existing repo usable.
439 # try to keep the existing repo usable.
440 newrepo = repository(repo.baseui, repo.root, create=False)
440 newrepo = repository(repo.baseui, repo.root, create=False)
441
441
442 # TODO: figure out how to access subrepos that exist, but were previously
442 # TODO: figure out how to access subrepos that exist, but were previously
443 # removed from .hgsub
443 # removed from .hgsub
444 c = newrepo[b'.']
444 c = newrepo[b'.']
445 subs = c.substate
445 subs = c.substate
446 for s in sorted(subs):
446 for s in sorted(subs):
447 c.sub(s).unshare()
447 c.sub(s).unshare()
448
448
449 localrepo.poisonrepository(repo)
449 localrepo.poisonrepository(repo)
450
450
451 return newrepo
451 return newrepo
452
452
453
453
454 def postshare(sourcerepo, destrepo, defaultpath=None):
454 def postshare(sourcerepo, destrepo, defaultpath=None):
455 """Called after a new shared repo is created.
455 """Called after a new shared repo is created.
456
456
457 The new repo only has a requirements file and pointer to the source.
457 The new repo only has a requirements file and pointer to the source.
458 This function configures additional shared data.
458 This function configures additional shared data.
459
459
460 Extensions can wrap this function and write additional entries to
460 Extensions can wrap this function and write additional entries to
461 destrepo/.hg/shared to indicate additional pieces of data to be shared.
461 destrepo/.hg/shared to indicate additional pieces of data to be shared.
462 """
462 """
463 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
463 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
464 if default:
464 if default:
465 template = b'[paths]\ndefault = %s\n'
465 template = b'[paths]\ndefault = %s\n'
466 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
466 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
467 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
467 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
468 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
468 with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
469 b"narrow-share"
469 b"narrow-share"
470 ):
470 ):
471 narrowspec.copytoworkingcopy(destrepo)
471 narrowspec.copytoworkingcopy(destrepo)
472
472
473
473
474 def _postshareupdate(repo, update, checkout=None):
474 def _postshareupdate(repo, update, checkout=None):
475 """Maybe perform a working directory update after a shared repo is created.
475 """Maybe perform a working directory update after a shared repo is created.
476
476
477 ``update`` can be a boolean or a revision to update to.
477 ``update`` can be a boolean or a revision to update to.
478 """
478 """
479 if not update:
479 if not update:
480 return
480 return
481
481
482 repo.ui.status(_(b"updating working directory\n"))
482 repo.ui.status(_(b"updating working directory\n"))
483 if update is not True:
483 if update is not True:
484 checkout = update
484 checkout = update
485 for test in (checkout, b'default', b'tip'):
485 for test in (checkout, b'default', b'tip'):
486 if test is None:
486 if test is None:
487 continue
487 continue
488 try:
488 try:
489 uprev = repo.lookup(test)
489 uprev = repo.lookup(test)
490 break
490 break
491 except error.RepoLookupError:
491 except error.RepoLookupError:
492 continue
492 continue
493 _update(repo, uprev)
493 _update(repo, uprev)
494
494
495
495
496 def copystore(ui, srcrepo, destpath):
496 def copystore(ui, srcrepo, destpath):
497 """copy files from store of srcrepo in destpath
497 """copy files from store of srcrepo in destpath
498
498
499 returns destlock
499 returns destlock
500 """
500 """
501 destlock = None
501 destlock = None
502 try:
502 try:
503 hardlink = None
503 hardlink = None
504 topic = _(b'linking') if hardlink else _(b'copying')
504 topic = _(b'linking') if hardlink else _(b'copying')
505 with ui.makeprogress(topic, unit=_(b'files')) as progress:
505 with ui.makeprogress(topic, unit=_(b'files')) as progress:
506 num = 0
506 num = 0
507 srcpublishing = srcrepo.publishing()
507 srcpublishing = srcrepo.publishing()
508 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
508 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
509 dstvfs = vfsmod.vfs(destpath)
509 dstvfs = vfsmod.vfs(destpath)
510 for f in srcrepo.store.copylist():
510 for f in srcrepo.store.copylist():
511 if srcpublishing and f.endswith(b'phaseroots'):
511 if srcpublishing and f.endswith(b'phaseroots'):
512 continue
512 continue
513 dstbase = os.path.dirname(f)
513 dstbase = os.path.dirname(f)
514 if dstbase and not dstvfs.exists(dstbase):
514 if dstbase and not dstvfs.exists(dstbase):
515 dstvfs.mkdir(dstbase)
515 dstvfs.mkdir(dstbase)
516 if srcvfs.exists(f):
516 if srcvfs.exists(f):
517 if f.endswith(b'data'):
517 if f.endswith(b'data'):
518 # 'dstbase' may be empty (e.g. revlog format 0)
518 # 'dstbase' may be empty (e.g. revlog format 0)
519 lockfile = os.path.join(dstbase, b"lock")
519 lockfile = os.path.join(dstbase, b"lock")
520 # lock to avoid premature writing to the target
520 # lock to avoid premature writing to the target
521 destlock = lock.lock(dstvfs, lockfile)
521 destlock = lock.lock(dstvfs, lockfile)
522 hardlink, n = util.copyfiles(
522 hardlink, n = util.copyfiles(
523 srcvfs.join(f), dstvfs.join(f), hardlink, progress
523 srcvfs.join(f), dstvfs.join(f), hardlink, progress
524 )
524 )
525 num += n
525 num += n
526 if hardlink:
526 if hardlink:
527 ui.debug(b"linked %d files\n" % num)
527 ui.debug(b"linked %d files\n" % num)
528 else:
528 else:
529 ui.debug(b"copied %d files\n" % num)
529 ui.debug(b"copied %d files\n" % num)
530 return destlock
530 return destlock
531 except: # re-raises
531 except: # re-raises
532 release(destlock)
532 release(destlock)
533 raise
533 raise
534
534
535
535
536 def clonewithshare(
536 def clonewithshare(
537 ui,
537 ui,
538 peeropts,
538 peeropts,
539 sharepath,
539 sharepath,
540 source,
540 source,
541 srcpeer,
541 srcpeer,
542 dest,
542 dest,
543 pull=False,
543 pull=False,
544 rev=None,
544 rev=None,
545 update=True,
545 update=True,
546 stream=False,
546 stream=False,
547 ):
547 ):
548 """Perform a clone using a shared repo.
548 """Perform a clone using a shared repo.
549
549
550 The store for the repository will be located at <sharepath>/.hg. The
550 The store for the repository will be located at <sharepath>/.hg. The
551 specified revisions will be cloned or pulled from "source". A shared repo
551 specified revisions will be cloned or pulled from "source". A shared repo
552 will be created at "dest" and a working copy will be created if "update" is
552 will be created at "dest" and a working copy will be created if "update" is
553 True.
553 True.
554 """
554 """
555 revs = None
555 revs = None
556 if rev:
556 if rev:
557 if not srcpeer.capable(b'lookup'):
557 if not srcpeer.capable(b'lookup'):
558 raise error.Abort(
558 raise error.Abort(
559 _(
559 _(
560 b"src repository does not support "
560 b"src repository does not support "
561 b"revision lookup and so doesn't "
561 b"revision lookup and so doesn't "
562 b"support clone by revision"
562 b"support clone by revision"
563 )
563 )
564 )
564 )
565
565
566 # TODO this is batchable.
566 # TODO this is batchable.
567 remoterevs = []
567 remoterevs = []
568 for r in rev:
568 for r in rev:
569 with srcpeer.commandexecutor() as e:
569 with srcpeer.commandexecutor() as e:
570 remoterevs.append(
570 remoterevs.append(
571 e.callcommand(
571 e.callcommand(
572 b'lookup',
572 b'lookup',
573 {
573 {
574 b'key': r,
574 b'key': r,
575 },
575 },
576 ).result()
576 ).result()
577 )
577 )
578 revs = remoterevs
578 revs = remoterevs
579
579
580 # Obtain a lock before checking for or cloning the pooled repo otherwise
580 # Obtain a lock before checking for or cloning the pooled repo otherwise
581 # 2 clients may race creating or populating it.
581 # 2 clients may race creating or populating it.
582 pooldir = os.path.dirname(sharepath)
582 pooldir = os.path.dirname(sharepath)
583 # lock class requires the directory to exist.
583 # lock class requires the directory to exist.
584 try:
584 try:
585 util.makedir(pooldir, False)
585 util.makedir(pooldir, False)
586 except FileExistsError:
586 except FileExistsError:
587 pass
587 pass
588
588
589 poolvfs = vfsmod.vfs(pooldir)
589 poolvfs = vfsmod.vfs(pooldir)
590 basename = os.path.basename(sharepath)
590 basename = os.path.basename(sharepath)
591
591
592 with lock.lock(poolvfs, b'%s.lock' % basename):
592 with lock.lock(poolvfs, b'%s.lock' % basename):
593 if os.path.exists(sharepath):
593 if os.path.exists(sharepath):
594 ui.status(
594 ui.status(
595 _(b'(sharing from existing pooled repository %s)\n') % basename
595 _(b'(sharing from existing pooled repository %s)\n') % basename
596 )
596 )
597 else:
597 else:
598 ui.status(
598 ui.status(
599 _(b'(sharing from new pooled repository %s)\n') % basename
599 _(b'(sharing from new pooled repository %s)\n') % basename
600 )
600 )
601 # Always use pull mode because hardlinks in share mode don't work
601 # Always use pull mode because hardlinks in share mode don't work
602 # well. Never update because working copies aren't necessary in
602 # well. Never update because working copies aren't necessary in
603 # share mode.
603 # share mode.
604 clone(
604 clone(
605 ui,
605 ui,
606 peeropts,
606 peeropts,
607 source,
607 source,
608 dest=sharepath,
608 dest=sharepath,
609 pull=True,
609 pull=True,
610 revs=rev,
610 revs=rev,
611 update=False,
611 update=False,
612 stream=stream,
612 stream=stream,
613 )
613 )
614
614
615 # Resolve the value to put in [paths] section for the source.
615 # Resolve the value to put in [paths] section for the source.
616 if islocal(source):
616 if islocal(source):
617 defaultpath = util.abspath(urlutil.urllocalpath(source))
617 defaultpath = util.abspath(urlutil.urllocalpath(source))
618 else:
618 else:
619 defaultpath = source
619 defaultpath = source
620
620
621 sharerepo = repository(ui, path=sharepath)
621 sharerepo = repository(ui, path=sharepath)
622 destrepo = share(
622 destrepo = share(
623 ui,
623 ui,
624 sharerepo,
624 sharerepo,
625 dest=dest,
625 dest=dest,
626 update=False,
626 update=False,
627 bookmarks=False,
627 bookmarks=False,
628 defaultpath=defaultpath,
628 defaultpath=defaultpath,
629 )
629 )
630
630
631 # We need to perform a pull against the dest repo to fetch bookmarks
631 # We need to perform a pull against the dest repo to fetch bookmarks
632 # and other non-store data that isn't shared by default. In the case of
632 # and other non-store data that isn't shared by default. In the case of
633 # non-existing shared repo, this means we pull from the remote twice. This
633 # non-existing shared repo, this means we pull from the remote twice. This
634 # is a bit weird. But at the time it was implemented, there wasn't an easy
634 # is a bit weird. But at the time it was implemented, there wasn't an easy
635 # way to pull just non-changegroup data.
635 # way to pull just non-changegroup data.
636 exchange.pull(destrepo, srcpeer, heads=revs)
636 exchange.pull(destrepo, srcpeer, heads=revs)
637
637
638 _postshareupdate(destrepo, update)
638 _postshareupdate(destrepo, update)
639
639
640 return srcpeer, peer(ui, peeropts, dest)
640 return srcpeer, peer(ui, peeropts, dest)
641
641
642
642
643 # Recomputing caches is often slow on big repos, so copy them.
643 # Recomputing caches is often slow on big repos, so copy them.
644 def _copycache(srcrepo, dstcachedir, fname):
644 def _copycache(srcrepo, dstcachedir, fname):
645 """copy a cache from srcrepo to destcachedir (if it exists)"""
645 """copy a cache from srcrepo to destcachedir (if it exists)"""
646 srcfname = srcrepo.cachevfs.join(fname)
646 srcfname = srcrepo.cachevfs.join(fname)
647 dstfname = os.path.join(dstcachedir, fname)
647 dstfname = os.path.join(dstcachedir, fname)
648 if os.path.exists(srcfname):
648 if os.path.exists(srcfname):
649 if not os.path.exists(dstcachedir):
649 if not os.path.exists(dstcachedir):
650 os.mkdir(dstcachedir)
650 os.mkdir(dstcachedir)
651 util.copyfile(srcfname, dstfname)
651 util.copyfile(srcfname, dstfname)
652
652
653
653
654 def clone(
654 def clone(
655 ui,
655 ui,
656 peeropts,
656 peeropts,
657 source,
657 source,
658 dest=None,
658 dest=None,
659 pull=False,
659 pull=False,
660 revs=None,
660 revs=None,
661 update=True,
661 update=True,
662 stream=False,
662 stream=False,
663 branch=None,
663 branch=None,
664 shareopts=None,
664 shareopts=None,
665 storeincludepats=None,
665 storeincludepats=None,
666 storeexcludepats=None,
666 storeexcludepats=None,
667 depth=None,
667 depth=None,
668 ):
668 ):
669 """Make a copy of an existing repository.
669 """Make a copy of an existing repository.
670
670
671 Create a copy of an existing repository in a new directory. The
671 Create a copy of an existing repository in a new directory. The
672 source and destination are URLs, as passed to the repository
672 source and destination are URLs, as passed to the repository
673 function. Returns a pair of repository peers, the source and
673 function. Returns a pair of repository peers, the source and
674 newly created destination.
674 newly created destination.
675
675
676 The location of the source is added to the new repository's
676 The location of the source is added to the new repository's
677 .hg/hgrc file, as the default to be used for future pulls and
677 .hg/hgrc file, as the default to be used for future pulls and
678 pushes.
678 pushes.
679
679
680 If an exception is raised, the partly cloned/updated destination
680 If an exception is raised, the partly cloned/updated destination
681 repository will be deleted.
681 repository will be deleted.
682
682
683 Arguments:
683 Arguments:
684
684
685 source: repository object or URL
685 source: repository object or URL
686
686
687 dest: URL of destination repository to create (defaults to base
687 dest: URL of destination repository to create (defaults to base
688 name of source repository)
688 name of source repository)
689
689
690 pull: always pull from source repository, even in local case or if the
690 pull: always pull from source repository, even in local case or if the
691 server prefers streaming
691 server prefers streaming
692
692
693 stream: stream raw data uncompressed from repository (fast over
693 stream: stream raw data uncompressed from repository (fast over
694 LAN, slow over WAN)
694 LAN, slow over WAN)
695
695
696 revs: revision to clone up to (implies pull=True)
696 revs: revision to clone up to (implies pull=True)
697
697
698 update: update working directory after clone completes, if
698 update: update working directory after clone completes, if
699 destination is local repository (True means update to default rev,
699 destination is local repository (True means update to default rev,
700 anything else is treated as a revision)
700 anything else is treated as a revision)
701
701
702 branch: branches to clone
702 branch: branches to clone
703
703
704 shareopts: dict of options to control auto sharing behavior. The "pool" key
704 shareopts: dict of options to control auto sharing behavior. The "pool" key
705 activates auto sharing mode and defines the directory for stores. The
705 activates auto sharing mode and defines the directory for stores. The
706 "mode" key determines how to construct the directory name of the shared
706 "mode" key determines how to construct the directory name of the shared
707 repository. "identity" means the name is derived from the node of the first
707 repository. "identity" means the name is derived from the node of the first
708 changeset in the repository. "remote" means the name is derived from the
708 changeset in the repository. "remote" means the name is derived from the
709 remote's path/URL. Defaults to "identity."
709 remote's path/URL. Defaults to "identity."
710
710
711 storeincludepats and storeexcludepats: sets of file patterns to include and
711 storeincludepats and storeexcludepats: sets of file patterns to include and
712 exclude in the repository copy, respectively. If not defined, all files
712 exclude in the repository copy, respectively. If not defined, all files
713 will be included (a "full" clone). Otherwise a "narrow" clone containing
713 will be included (a "full" clone). Otherwise a "narrow" clone containing
714 only the requested files will be performed. If ``storeincludepats`` is not
714 only the requested files will be performed. If ``storeincludepats`` is not
715 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
715 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
716 ``path:.``. If both are empty sets, no files will be cloned.
716 ``path:.``. If both are empty sets, no files will be cloned.
717 """
717 """
718
718
719 if isinstance(source, bytes):
719 if isinstance(source, bytes):
720 src_path = urlutil.get_clone_path_obj(ui, source)
720 src_path = urlutil.get_clone_path_obj(ui, source)
721 if src_path is None:
721 if src_path is None:
722 srcpeer = peer(ui, peeropts, b'')
722 srcpeer = peer(ui, peeropts, b'')
723 origsource = source = b''
723 origsource = source = b''
724 branches = (None, branch or [])
724 branches = (None, branch or [])
725 else:
725 else:
726 srcpeer = peer(ui, peeropts, src_path)
726 srcpeer = peer(ui, peeropts, src_path)
727 origsource = src_path.rawloc
727 origsource = src_path.rawloc
728 branches = (src_path.branch, branch or [])
728 branches = (src_path.branch, branch or [])
729 source = src_path.loc
729 source = src_path.loc
730 else:
730 else:
731 if hasattr(source, 'peer'):
731 if hasattr(source, 'peer'):
732 srcpeer = source.peer() # in case we were called with a localrepo
732 srcpeer = source.peer() # in case we were called with a localrepo
733 else:
733 else:
734 srcpeer = source
734 srcpeer = source
735 branches = (None, branch or [])
735 branches = (None, branch or [])
736 # XXX path: simply use the peer `path` object when this become available
736 # XXX path: simply use the peer `path` object when this become available
737 origsource = source = srcpeer.url()
737 origsource = source = srcpeer.url()
738 srclock = destlock = destwlock = cleandir = None
738 srclock = destlock = destwlock = cleandir = None
739 destpeer = None
739 destpeer = None
740 try:
740 try:
741 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
741 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
742
742
743 if dest is None:
743 if dest is None:
744 dest = defaultdest(source)
744 dest = defaultdest(source)
745 if dest:
745 if dest:
746 ui.status(_(b"destination directory: %s\n") % dest)
746 ui.status(_(b"destination directory: %s\n") % dest)
747 else:
747 else:
748 dest_path = urlutil.get_clone_path_obj(ui, dest)
748 dest_path = urlutil.get_clone_path_obj(ui, dest)
749 if dest_path is not None:
749 if dest_path is not None:
750 dest = dest_path.rawloc
750 dest = dest_path.rawloc
751 else:
751 else:
752 dest = b''
752 dest = b''
753
753
754 dest = urlutil.urllocalpath(dest)
754 dest = urlutil.urllocalpath(dest)
755 source = urlutil.urllocalpath(source)
755 source = urlutil.urllocalpath(source)
756
756
757 if not dest:
757 if not dest:
758 raise error.InputError(_(b"empty destination path is not valid"))
758 raise error.InputError(_(b"empty destination path is not valid"))
759
759
760 destvfs = vfsmod.vfs(dest, expandpath=True)
760 destvfs = vfsmod.vfs(dest, expandpath=True)
761 if destvfs.lexists():
761 if destvfs.lexists():
762 if not destvfs.isdir():
762 if not destvfs.isdir():
763 raise error.InputError(
763 raise error.InputError(
764 _(b"destination '%s' already exists") % dest
764 _(b"destination '%s' already exists") % dest
765 )
765 )
766 elif destvfs.listdir():
766 elif destvfs.listdir():
767 raise error.InputError(
767 raise error.InputError(
768 _(b"destination '%s' is not empty") % dest
768 _(b"destination '%s' is not empty") % dest
769 )
769 )
770
770
771 createopts = {}
771 createopts = {}
772 narrow = False
772 narrow = False
773
773
774 if storeincludepats is not None:
774 if storeincludepats is not None:
775 narrowspec.validatepatterns(storeincludepats)
775 narrowspec.validatepatterns(storeincludepats)
776 narrow = True
776 narrow = True
777
777
778 if storeexcludepats is not None:
778 if storeexcludepats is not None:
779 narrowspec.validatepatterns(storeexcludepats)
779 narrowspec.validatepatterns(storeexcludepats)
780 narrow = True
780 narrow = True
781
781
782 if narrow:
782 if narrow:
783 # Include everything by default if only exclusion patterns defined.
783 # Include everything by default if only exclusion patterns defined.
784 if storeexcludepats and not storeincludepats:
784 if storeexcludepats and not storeincludepats:
785 storeincludepats = {b'path:.'}
785 storeincludepats = {b'path:.'}
786
786
787 createopts[b'narrowfiles'] = True
787 createopts[b'narrowfiles'] = True
788
788
789 if depth:
789 if depth:
790 createopts[b'shallowfilestore'] = True
790 createopts[b'shallowfilestore'] = True
791
791
792 if srcpeer.capable(b'lfs-serve'):
792 if srcpeer.capable(b'lfs-serve'):
793 # Repository creation honors the config if it disabled the extension, so
793 # Repository creation honors the config if it disabled the extension, so
794 # we can't just announce that lfs will be enabled. This check avoids
794 # we can't just announce that lfs will be enabled. This check avoids
795 # saying that lfs will be enabled, and then saying it's an unknown
795 # saying that lfs will be enabled, and then saying it's an unknown
796 # feature. The lfs creation option is set in either case so that a
796 # feature. The lfs creation option is set in either case so that a
797 # requirement is added. If the extension is explicitly disabled but the
797 # requirement is added. If the extension is explicitly disabled but the
798 # requirement is set, the clone aborts early, before transferring any
798 # requirement is set, the clone aborts early, before transferring any
799 # data.
799 # data.
800 createopts[b'lfs'] = True
800 createopts[b'lfs'] = True
801
801
802 if b'lfs' in extensions.disabled():
802 if b'lfs' in extensions.disabled():
803 ui.status(
803 ui.status(
804 _(
804 _(
805 b'(remote is using large file support (lfs), but it is '
805 b'(remote is using large file support (lfs), but it is '
806 b'explicitly disabled in the local configuration)\n'
806 b'explicitly disabled in the local configuration)\n'
807 )
807 )
808 )
808 )
809 else:
809 else:
810 ui.status(
810 ui.status(
811 _(
811 _(
812 b'(remote is using large file support (lfs); lfs will '
812 b'(remote is using large file support (lfs); lfs will '
813 b'be enabled for this repository)\n'
813 b'be enabled for this repository)\n'
814 )
814 )
815 )
815 )
816
816
817 shareopts = shareopts or {}
817 shareopts = shareopts or {}
818 sharepool = shareopts.get(b'pool')
818 sharepool = shareopts.get(b'pool')
819 sharenamemode = shareopts.get(b'mode')
819 sharenamemode = shareopts.get(b'mode')
820 if sharepool and islocal(dest):
820 if sharepool and islocal(dest):
821 sharepath = None
821 sharepath = None
822 if sharenamemode == b'identity':
822 if sharenamemode == b'identity':
823 # Resolve the name from the initial changeset in the remote
823 # Resolve the name from the initial changeset in the remote
824 # repository. This returns nullid when the remote is empty. It
824 # repository. This returns nullid when the remote is empty. It
825 # raises RepoLookupError if revision 0 is filtered or otherwise
825 # raises RepoLookupError if revision 0 is filtered or otherwise
826 # not available. If we fail to resolve, sharing is not enabled.
826 # not available. If we fail to resolve, sharing is not enabled.
827 try:
827 try:
828 with srcpeer.commandexecutor() as e:
828 with srcpeer.commandexecutor() as e:
829 rootnode = e.callcommand(
829 rootnode = e.callcommand(
830 b'lookup',
830 b'lookup',
831 {
831 {
832 b'key': b'0',
832 b'key': b'0',
833 },
833 },
834 ).result()
834 ).result()
835
835
836 if rootnode != sha1nodeconstants.nullid:
836 if rootnode != sha1nodeconstants.nullid:
837 sharepath = os.path.join(sharepool, hex(rootnode))
837 sharepath = os.path.join(sharepool, hex(rootnode))
838 else:
838 else:
839 ui.status(
839 ui.status(
840 _(
840 _(
841 b'(not using pooled storage: '
841 b'(not using pooled storage: '
842 b'remote appears to be empty)\n'
842 b'remote appears to be empty)\n'
843 )
843 )
844 )
844 )
845 except error.RepoLookupError:
845 except error.RepoLookupError:
846 ui.status(
846 ui.status(
847 _(
847 _(
848 b'(not using pooled storage: '
848 b'(not using pooled storage: '
849 b'unable to resolve identity of remote)\n'
849 b'unable to resolve identity of remote)\n'
850 )
850 )
851 )
851 )
852 elif sharenamemode == b'remote':
852 elif sharenamemode == b'remote':
853 sharepath = os.path.join(
853 sharepath = os.path.join(
854 sharepool, hex(hashutil.sha1(source).digest())
854 sharepool, hex(hashutil.sha1(source).digest())
855 )
855 )
856 else:
856 else:
857 raise error.Abort(
857 raise error.Abort(
858 _(b'unknown share naming mode: %s') % sharenamemode
858 _(b'unknown share naming mode: %s') % sharenamemode
859 )
859 )
860
860
861 # TODO this is a somewhat arbitrary restriction.
861 # TODO this is a somewhat arbitrary restriction.
862 if narrow:
862 if narrow:
863 ui.status(
863 ui.status(
864 _(b'(pooled storage not supported for narrow clones)\n')
864 _(b'(pooled storage not supported for narrow clones)\n')
865 )
865 )
866 sharepath = None
866 sharepath = None
867
867
868 if sharepath:
868 if sharepath:
869 return clonewithshare(
869 return clonewithshare(
870 ui,
870 ui,
871 peeropts,
871 peeropts,
872 sharepath,
872 sharepath,
873 source,
873 source,
874 srcpeer,
874 srcpeer,
875 dest,
875 dest,
876 pull=pull,
876 pull=pull,
877 rev=revs,
877 rev=revs,
878 update=update,
878 update=update,
879 stream=stream,
879 stream=stream,
880 )
880 )
881
881
882 srcrepo = srcpeer.local()
882 srcrepo = srcpeer.local()
883
883
884 abspath = origsource
884 abspath = origsource
885 if islocal(origsource):
885 if islocal(origsource):
886 abspath = util.abspath(urlutil.urllocalpath(origsource))
886 abspath = util.abspath(urlutil.urllocalpath(origsource))
887
887
888 if islocal(dest):
888 if islocal(dest):
889 if os.path.exists(dest):
889 if os.path.exists(dest):
890 # only clean up directories we create ourselves
890 # only clean up directories we create ourselves
891 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
891 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
892 cleandir = hgdir
892 cleandir = hgdir
893 else:
893 else:
894 cleandir = dest
894 cleandir = dest
895
895
896 copy = False
896 copy = False
897 if (
897 if (
898 srcrepo
898 srcrepo
899 and srcrepo.cancopy()
899 and srcrepo.cancopy()
900 and islocal(dest)
900 and islocal(dest)
901 and not phases.hassecret(srcrepo)
901 and not phases.hassecret(srcrepo)
902 ):
902 ):
903 copy = not pull and not revs
903 copy = not pull and not revs
904
904
905 # TODO this is a somewhat arbitrary restriction.
905 # TODO this is a somewhat arbitrary restriction.
906 if narrow:
906 if narrow:
907 copy = False
907 copy = False
908
908
909 if copy:
909 if copy:
910 try:
910 try:
911 # we use a lock here because if we race with commit, we
911 # we use a lock here because if we race with commit, we
912 # can end up with extra data in the cloned revlogs that's
912 # can end up with extra data in the cloned revlogs that's
913 # not pointed to by changesets, thus causing verify to
913 # not pointed to by changesets, thus causing verify to
914 # fail
914 # fail
915 srclock = srcrepo.lock(wait=False)
915 srclock = srcrepo.lock(wait=False)
916 except error.LockError:
916 except error.LockError:
917 copy = False
917 copy = False
918
918
919 if copy:
919 if copy:
920 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
920 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
921
921
922 destrootpath = urlutil.urllocalpath(dest)
922 destrootpath = urlutil.urllocalpath(dest)
923 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
923 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
924 localrepo.createrepository(
924 localrepo.createrepository(
925 ui,
925 ui,
926 destrootpath,
926 destrootpath,
927 requirements=dest_reqs,
927 requirements=dest_reqs,
928 )
928 )
929 destrepo = localrepo.makelocalrepository(ui, destrootpath)
929 destrepo = localrepo.makelocalrepository(ui, destrootpath)
930
930
931 destwlock = destrepo.wlock()
931 destwlock = destrepo.wlock()
932 destlock = destrepo.lock()
932 destlock = destrepo.lock()
933 from . import streamclone # avoid cycle
933 from . import streamclone # avoid cycle
934
934
935 streamclone.local_copy(srcrepo, destrepo)
935 streamclone.local_copy(srcrepo, destrepo)
936
936
937 # we need to re-init the repo after manually copying the data
937 # we need to re-init the repo after manually copying the data
938 # into it
938 # into it
939 destpeer = peer(srcrepo, peeropts, dest)
939 destpeer = peer(srcrepo, peeropts, dest)
940
940
941 # make the peer aware that is it already locked
941 # make the peer aware that is it already locked
942 #
942 #
943 # important:
943 # important:
944 #
944 #
945 # We still need to release that lock at the end of the function
945 # We still need to release that lock at the end of the function
946 destpeer.local()._lockref = weakref.ref(destlock)
946 destpeer.local()._lockref = weakref.ref(destlock)
947 destpeer.local()._wlockref = weakref.ref(destwlock)
947 destpeer.local()._wlockref = weakref.ref(destwlock)
948 # dirstate also needs to be copied because `_wlockref` has a reference
948 # dirstate also needs to be copied because `_wlockref` has a reference
949 # to it: this dirstate is saved to disk when the wlock is released
949 # to it: this dirstate is saved to disk when the wlock is released
950 destpeer.local().dirstate = destrepo.dirstate
950 destpeer.local().dirstate = destrepo.dirstate
951
951
952 srcrepo.hook(
952 srcrepo.hook(
953 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
953 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
954 )
954 )
955 else:
955 else:
956 try:
956 try:
957 # only pass ui when no srcrepo
957 # only pass ui when no srcrepo
958 destpeer = peer(
958 destpeer = peer(
959 srcrepo or ui,
959 srcrepo or ui,
960 peeropts,
960 peeropts,
961 dest,
961 dest,
962 create=True,
962 create=True,
963 createopts=createopts,
963 createopts=createopts,
964 )
964 )
965 except FileExistsError:
965 except FileExistsError:
966 cleandir = None
966 cleandir = None
967 raise error.Abort(_(b"destination '%s' already exists") % dest)
967 raise error.Abort(_(b"destination '%s' already exists") % dest)
968
968
969 if revs:
969 if revs:
970 if not srcpeer.capable(b'lookup'):
970 if not srcpeer.capable(b'lookup'):
971 raise error.Abort(
971 raise error.Abort(
972 _(
972 _(
973 b"src repository does not support "
973 b"src repository does not support "
974 b"revision lookup and so doesn't "
974 b"revision lookup and so doesn't "
975 b"support clone by revision"
975 b"support clone by revision"
976 )
976 )
977 )
977 )
978
978
979 # TODO this is batchable.
979 # TODO this is batchable.
980 remoterevs = []
980 remoterevs = []
981 for rev in revs:
981 for rev in revs:
982 with srcpeer.commandexecutor() as e:
982 with srcpeer.commandexecutor() as e:
983 remoterevs.append(
983 remoterevs.append(
984 e.callcommand(
984 e.callcommand(
985 b'lookup',
985 b'lookup',
986 {
986 {
987 b'key': rev,
987 b'key': rev,
988 },
988 },
989 ).result()
989 ).result()
990 )
990 )
991 revs = remoterevs
991 revs = remoterevs
992
992
993 checkout = revs[0]
993 checkout = revs[0]
994 else:
994 else:
995 revs = None
995 revs = None
996 local = destpeer.local()
996 local = destpeer.local()
997 if local:
997 if local:
998 if narrow:
998 if narrow:
999 with local.wlock(), local.lock(), local.transaction(
999 with local.wlock(), local.lock(), local.transaction(
1000 b'narrow-clone'
1000 b'narrow-clone'
1001 ):
1001 ):
1002 local.setnarrowpats(storeincludepats, storeexcludepats)
1002 local.setnarrowpats(storeincludepats, storeexcludepats)
1003 narrowspec.copytoworkingcopy(local)
1003 narrowspec.copytoworkingcopy(local)
1004
1004
1005 u = urlutil.url(abspath)
1005 u = urlutil.url(abspath)
1006 defaulturl = bytes(u)
1006 defaulturl = bytes(u)
1007 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1007 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1008 if not stream:
1008 if not stream:
1009 if pull:
1009 if pull:
1010 stream = False
1010 stream = False
1011 else:
1011 else:
1012 stream = None
1012 stream = None
1013 # internal config: ui.quietbookmarkmove
1013 # internal config: ui.quietbookmarkmove
1014 overrides = {(b'ui', b'quietbookmarkmove'): True}
1014 overrides = {(b'ui', b'quietbookmarkmove'): True}
1015 with local.ui.configoverride(overrides, b'clone'):
1015 with local.ui.configoverride(overrides, b'clone'):
1016 exchange.pull(
1016 exchange.pull(
1017 local,
1017 local,
1018 srcpeer,
1018 srcpeer,
1019 heads=revs,
1019 heads=revs,
1020 streamclonerequested=stream,
1020 streamclonerequested=stream,
1021 includepats=storeincludepats,
1021 includepats=storeincludepats,
1022 excludepats=storeexcludepats,
1022 excludepats=storeexcludepats,
1023 depth=depth,
1023 depth=depth,
1024 )
1024 )
1025 elif srcrepo:
1025 elif srcrepo:
1026 # TODO lift restriction once exchange.push() accepts narrow
1026 # TODO lift restriction once exchange.push() accepts narrow
1027 # push.
1027 # push.
1028 if narrow:
1028 if narrow:
1029 raise error.Abort(
1029 raise error.Abort(
1030 _(
1030 _(
1031 b'narrow clone not available for '
1031 b'narrow clone not available for '
1032 b'remote destinations'
1032 b'remote destinations'
1033 )
1033 )
1034 )
1034 )
1035
1035
1036 exchange.push(
1036 exchange.push(
1037 srcrepo,
1037 srcrepo,
1038 destpeer,
1038 destpeer,
1039 revs=revs,
1039 revs=revs,
1040 bookmarks=srcrepo._bookmarks.keys(),
1040 bookmarks=srcrepo._bookmarks.keys(),
1041 )
1041 )
1042 else:
1042 else:
1043 raise error.Abort(
1043 raise error.Abort(
1044 _(b"clone from remote to remote not supported")
1044 _(b"clone from remote to remote not supported")
1045 )
1045 )
1046
1046
1047 cleandir = None
1047 cleandir = None
1048
1048
1049 destrepo = destpeer.local()
1049 destrepo = destpeer.local()
1050 if destrepo:
1050 if destrepo:
1051 template = uimod.samplehgrcs[b'cloned']
1051 template = uimod.samplehgrcs[b'cloned']
1052 u = urlutil.url(abspath)
1052 u = urlutil.url(abspath)
1053 u.passwd = None
1053 u.passwd = None
1054 defaulturl = bytes(u)
1054 defaulturl = bytes(u)
1055 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1055 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1056 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1056 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1057
1057
1058 if ui.configbool(b'experimental', b'remotenames'):
1058 if ui.configbool(b'experimental', b'remotenames'):
1059 logexchange.pullremotenames(destrepo, srcpeer)
1059 logexchange.pullremotenames(destrepo, srcpeer)
1060
1060
1061 if update:
1061 if update:
1062 if update is not True:
1062 if update is not True:
1063 with srcpeer.commandexecutor() as e:
1063 with srcpeer.commandexecutor() as e:
1064 checkout = e.callcommand(
1064 checkout = e.callcommand(
1065 b'lookup',
1065 b'lookup',
1066 {
1066 {
1067 b'key': update,
1067 b'key': update,
1068 },
1068 },
1069 ).result()
1069 ).result()
1070
1070
1071 uprev = None
1071 uprev = None
1072 status = None
1072 status = None
1073 if checkout is not None:
1073 if checkout is not None:
1074 # Some extensions (at least hg-git and hg-subversion) have
1074 # Some extensions (at least hg-git and hg-subversion) have
1075 # a peer.lookup() implementation that returns a name instead
1075 # a peer.lookup() implementation that returns a name instead
1076 # of a nodeid. We work around it here until we've figured
1076 # of a nodeid. We work around it here until we've figured
1077 # out a better solution.
1077 # out a better solution.
1078 if len(checkout) == 20 and checkout in destrepo:
1078 if len(checkout) == 20 and checkout in destrepo:
1079 uprev = checkout
1079 uprev = checkout
1080 elif scmutil.isrevsymbol(destrepo, checkout):
1080 elif scmutil.isrevsymbol(destrepo, checkout):
1081 uprev = scmutil.revsymbol(destrepo, checkout).node()
1081 uprev = scmutil.revsymbol(destrepo, checkout).node()
1082 else:
1082 else:
1083 if update is not True:
1083 if update is not True:
1084 try:
1084 try:
1085 uprev = destrepo.lookup(update)
1085 uprev = destrepo.lookup(update)
1086 except error.RepoLookupError:
1086 except error.RepoLookupError:
1087 pass
1087 pass
1088 if uprev is None:
1088 if uprev is None:
1089 try:
1089 try:
1090 if destrepo._activebookmark:
1090 if destrepo._activebookmark:
1091 uprev = destrepo.lookup(destrepo._activebookmark)
1091 uprev = destrepo.lookup(destrepo._activebookmark)
1092 update = destrepo._activebookmark
1092 update = destrepo._activebookmark
1093 else:
1093 else:
1094 uprev = destrepo._bookmarks[b'@']
1094 uprev = destrepo._bookmarks[b'@']
1095 update = b'@'
1095 update = b'@'
1096 bn = destrepo[uprev].branch()
1096 bn = destrepo[uprev].branch()
1097 if bn == b'default':
1097 if bn == b'default':
1098 status = _(b"updating to bookmark %s\n" % update)
1098 status = _(b"updating to bookmark %s\n" % update)
1099 else:
1099 else:
1100 status = (
1100 status = (
1101 _(b"updating to bookmark %s on branch %s\n")
1101 _(b"updating to bookmark %s on branch %s\n")
1102 ) % (update, bn)
1102 ) % (update, bn)
1103 except KeyError:
1103 except KeyError:
1104 try:
1104 try:
1105 uprev = destrepo.branchtip(b'default')
1105 uprev = destrepo.branchtip(b'default')
1106 except error.RepoLookupError:
1106 except error.RepoLookupError:
1107 uprev = destrepo.lookup(b'tip')
1107 uprev = destrepo.lookup(b'tip')
1108 if not status:
1108 if not status:
1109 bn = destrepo[uprev].branch()
1109 bn = destrepo[uprev].branch()
1110 status = _(b"updating to branch %s\n") % bn
1110 status = _(b"updating to branch %s\n") % bn
1111 destrepo.ui.status(status)
1111 destrepo.ui.status(status)
1112 _update(destrepo, uprev)
1112 _update(destrepo, uprev)
1113 if update in destrepo._bookmarks:
1113 if update in destrepo._bookmarks:
1114 bookmarks.activate(destrepo, update)
1114 bookmarks.activate(destrepo, update)
1115 if destlock is not None:
1115 if destlock is not None:
1116 release(destlock)
1116 release(destlock)
1117 if destwlock is not None:
1117 if destwlock is not None:
1118 release(destlock)
1118 release(destlock)
1119 # here is a tiny windows were someone could end up writing the
1119 # here is a tiny windows were someone could end up writing the
1120 # repository before the cache are sure to be warm. This is "fine"
1120 # repository before the cache are sure to be warm. This is "fine"
1121 # as the only "bad" outcome would be some slowness. That potential
1121 # as the only "bad" outcome would be some slowness. That potential
1122 # slowness already affect reader.
1122 # slowness already affect reader.
1123 with destrepo.lock():
1123 with destrepo.lock():
1124 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1124 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1125 finally:
1125 finally:
1126 release(srclock, destlock, destwlock)
1126 release(srclock, destlock, destwlock)
1127 if cleandir is not None:
1127 if cleandir is not None:
1128 shutil.rmtree(cleandir, True)
1128 shutil.rmtree(cleandir, True)
1129 if srcpeer is not None:
1129 if srcpeer is not None:
1130 srcpeer.close()
1130 srcpeer.close()
1131 if destpeer and destpeer.local() is None:
1131 if destpeer and destpeer.local() is None:
1132 destpeer.close()
1132 destpeer.close()
1133 return srcpeer, destpeer
1133 return srcpeer, destpeer
1134
1134
1135
1135
1136 def _showstats(repo, stats, quietempty=False):
1136 def _showstats(repo, stats, quietempty=False):
1137 if quietempty and stats.isempty():
1137 if quietempty and stats.isempty():
1138 return
1138 return
1139 repo.ui.status(
1139 repo.ui.status(
1140 _(
1140 _(
1141 b"%d files updated, %d files merged, "
1141 b"%d files updated, %d files merged, "
1142 b"%d files removed, %d files unresolved\n"
1142 b"%d files removed, %d files unresolved\n"
1143 )
1143 )
1144 % (
1144 % (
1145 stats.updatedcount,
1145 stats.updatedcount,
1146 stats.mergedcount,
1146 stats.mergedcount,
1147 stats.removedcount,
1147 stats.removedcount,
1148 stats.unresolvedcount,
1148 stats.unresolvedcount,
1149 )
1149 )
1150 )
1150 )
1151
1151
1152
1152
1153 def updaterepo(repo, node, overwrite, updatecheck=None):
1153 def updaterepo(repo, node, overwrite, updatecheck=None):
1154 """Update the working directory to node.
1154 """Update the working directory to node.
1155
1155
1156 When overwrite is set, changes are clobbered, merged else
1156 When overwrite is set, changes are clobbered, merged else
1157
1157
1158 returns stats (see pydoc mercurial.merge.applyupdates)"""
1158 returns stats (see pydoc mercurial.merge.applyupdates)"""
1159 repo.ui.deprecwarn(
1159 repo.ui.deprecwarn(
1160 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1160 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1161 b'5.7',
1161 b'5.7',
1162 )
1162 )
1163 return mergemod._update(
1163 return mergemod._update(
1164 repo,
1164 repo,
1165 node,
1165 node,
1166 branchmerge=False,
1166 branchmerge=False,
1167 force=overwrite,
1167 force=overwrite,
1168 labels=[b'working copy', b'destination'],
1168 labels=[b'working copy', b'destination'],
1169 updatecheck=updatecheck,
1169 updatecheck=updatecheck,
1170 )
1170 )
1171
1171
1172
1172
1173 def update(repo, node, quietempty=False, updatecheck=None):
1173 def update(repo, node, quietempty=False, updatecheck=None):
1174 """update the working directory to node"""
1174 """update the working directory to node"""
1175 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1175 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1176 _showstats(repo, stats, quietempty)
1176 _showstats(repo, stats, quietempty)
1177 if stats.unresolvedcount:
1177 if stats.unresolvedcount:
1178 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1178 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1179 return stats.unresolvedcount > 0
1179 return stats.unresolvedcount > 0
1180
1180
1181
1181
1182 # naming conflict in clone()
1182 # naming conflict in clone()
1183 _update = update
1183 _update = update
1184
1184
1185
1185
1186 def clean(repo, node, show_stats=True, quietempty=False):
1186 def clean(repo, node, show_stats=True, quietempty=False):
1187 """forcibly switch the working directory to node, clobbering changes"""
1187 """forcibly switch the working directory to node, clobbering changes"""
1188 stats = mergemod.clean_update(repo[node])
1188 stats = mergemod.clean_update(repo[node])
1189 assert stats.unresolvedcount == 0
1189 assert stats.unresolvedcount == 0
1190 if show_stats:
1190 if show_stats:
1191 _showstats(repo, stats, quietempty)
1191 _showstats(repo, stats, quietempty)
1192 return False
1192 return False
1193
1193
1194
1194
1195 # naming conflict in updatetotally()
1195 # naming conflict in updatetotally()
1196 _clean = clean
1196 _clean = clean
1197
1197
1198 _VALID_UPDATECHECKS = {
1198 _VALID_UPDATECHECKS = {
1199 mergemod.UPDATECHECK_ABORT,
1199 mergemod.UPDATECHECK_ABORT,
1200 mergemod.UPDATECHECK_NONE,
1200 mergemod.UPDATECHECK_NONE,
1201 mergemod.UPDATECHECK_LINEAR,
1201 mergemod.UPDATECHECK_LINEAR,
1202 mergemod.UPDATECHECK_NO_CONFLICT,
1202 mergemod.UPDATECHECK_NO_CONFLICT,
1203 }
1203 }
1204
1204
1205
1205
1206 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1206 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1207 """Update the working directory with extra care for non-file components
1207 """Update the working directory with extra care for non-file components
1208
1208
1209 This takes care of non-file components below:
1209 This takes care of non-file components below:
1210
1210
1211 :bookmark: might be advanced or (in)activated
1211 :bookmark: might be advanced or (in)activated
1212
1212
1213 This takes arguments below:
1213 This takes arguments below:
1214
1214
1215 :checkout: to which revision the working directory is updated
1215 :checkout: to which revision the working directory is updated
1216 :brev: a name, which might be a bookmark to be activated after updating
1216 :brev: a name, which might be a bookmark to be activated after updating
1217 :clean: whether changes in the working directory can be discarded
1217 :clean: whether changes in the working directory can be discarded
1218 :updatecheck: how to deal with a dirty working directory
1218 :updatecheck: how to deal with a dirty working directory
1219
1219
1220 Valid values for updatecheck are the UPDATECHECK_* constants
1220 Valid values for updatecheck are the UPDATECHECK_* constants
1221 defined in the merge module. Passing `None` will result in using the
1221 defined in the merge module. Passing `None` will result in using the
1222 configured default.
1222 configured default.
1223
1223
1224 * ABORT: abort if the working directory is dirty
1224 * ABORT: abort if the working directory is dirty
1225 * NONE: don't check (merge working directory changes into destination)
1225 * NONE: don't check (merge working directory changes into destination)
1226 * LINEAR: check that update is linear before merging working directory
1226 * LINEAR: check that update is linear before merging working directory
1227 changes into destination
1227 changes into destination
1228 * NO_CONFLICT: check that the update does not result in file merges
1228 * NO_CONFLICT: check that the update does not result in file merges
1229
1229
1230 This returns whether conflict is detected at updating or not.
1230 This returns whether conflict is detected at updating or not.
1231 """
1231 """
1232 if updatecheck is None:
1232 if updatecheck is None:
1233 updatecheck = ui.config(b'commands', b'update.check')
1233 updatecheck = ui.config(b'commands', b'update.check')
1234 if updatecheck not in _VALID_UPDATECHECKS:
1234 if updatecheck not in _VALID_UPDATECHECKS:
1235 # If not configured, or invalid value configured
1235 # If not configured, or invalid value configured
1236 updatecheck = mergemod.UPDATECHECK_LINEAR
1236 updatecheck = mergemod.UPDATECHECK_LINEAR
1237 if updatecheck not in _VALID_UPDATECHECKS:
1237 if updatecheck not in _VALID_UPDATECHECKS:
1238 raise ValueError(
1238 raise ValueError(
1239 r'Invalid updatecheck value %r (can accept %r)'
1239 r'Invalid updatecheck value %r (can accept %r)'
1240 % (updatecheck, _VALID_UPDATECHECKS)
1240 % (updatecheck, _VALID_UPDATECHECKS)
1241 )
1241 )
1242 with repo.wlock():
1242 with repo.wlock():
1243 movemarkfrom = None
1243 movemarkfrom = None
1244 warndest = False
1244 warndest = False
1245 if checkout is None:
1245 if checkout is None:
1246 updata = destutil.destupdate(repo, clean=clean)
1246 updata = destutil.destupdate(repo, clean=clean)
1247 checkout, movemarkfrom, brev = updata
1247 checkout, movemarkfrom, brev = updata
1248 warndest = True
1248 warndest = True
1249
1249
1250 if clean:
1250 if clean:
1251 ret = _clean(repo, checkout)
1251 ret = _clean(repo, checkout)
1252 else:
1252 else:
1253 if updatecheck == mergemod.UPDATECHECK_ABORT:
1253 if updatecheck == mergemod.UPDATECHECK_ABORT:
1254 cmdutil.bailifchanged(repo, merge=False)
1254 cmdutil.bailifchanged(repo, merge=False)
1255 updatecheck = mergemod.UPDATECHECK_NONE
1255 updatecheck = mergemod.UPDATECHECK_NONE
1256 ret = _update(repo, checkout, updatecheck=updatecheck)
1256 ret = _update(repo, checkout, updatecheck=updatecheck)
1257
1257
1258 if not ret and movemarkfrom:
1258 if not ret and movemarkfrom:
1259 if movemarkfrom == repo[b'.'].node():
1259 if movemarkfrom == repo[b'.'].node():
1260 pass # no-op update
1260 pass # no-op update
1261 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1261 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1262 b = ui.label(repo._activebookmark, b'bookmarks.active')
1262 b = ui.label(repo._activebookmark, b'bookmarks.active')
1263 ui.status(_(b"updating bookmark %s\n") % b)
1263 ui.status(_(b"updating bookmark %s\n") % b)
1264 else:
1264 else:
1265 # this can happen with a non-linear update
1265 # this can happen with a non-linear update
1266 b = ui.label(repo._activebookmark, b'bookmarks')
1266 b = ui.label(repo._activebookmark, b'bookmarks')
1267 ui.status(_(b"(leaving bookmark %s)\n") % b)
1267 ui.status(_(b"(leaving bookmark %s)\n") % b)
1268 bookmarks.deactivate(repo)
1268 bookmarks.deactivate(repo)
1269 elif brev in repo._bookmarks:
1269 elif brev in repo._bookmarks:
1270 if brev != repo._activebookmark:
1270 if brev != repo._activebookmark:
1271 b = ui.label(brev, b'bookmarks.active')
1271 b = ui.label(brev, b'bookmarks.active')
1272 ui.status(_(b"(activating bookmark %s)\n") % b)
1272 ui.status(_(b"(activating bookmark %s)\n") % b)
1273 bookmarks.activate(repo, brev)
1273 bookmarks.activate(repo, brev)
1274 elif brev:
1274 elif brev:
1275 if repo._activebookmark:
1275 if repo._activebookmark:
1276 b = ui.label(repo._activebookmark, b'bookmarks')
1276 b = ui.label(repo._activebookmark, b'bookmarks')
1277 ui.status(_(b"(leaving bookmark %s)\n") % b)
1277 ui.status(_(b"(leaving bookmark %s)\n") % b)
1278 bookmarks.deactivate(repo)
1278 bookmarks.deactivate(repo)
1279
1279
1280 if warndest:
1280 if warndest:
1281 destutil.statusotherdests(ui, repo)
1281 destutil.statusotherdests(ui, repo)
1282
1282
1283 return ret
1283 return ret
1284
1284
1285
1285
1286 def merge(
1286 def merge(
1287 ctx,
1287 ctx,
1288 force=False,
1288 force=False,
1289 remind=True,
1289 remind=True,
1290 labels=None,
1290 labels=None,
1291 ):
1291 ):
1292 """Branch merge with node, resolving changes. Return true if any
1292 """Branch merge with node, resolving changes. Return true if any
1293 unresolved conflicts."""
1293 unresolved conflicts."""
1294 repo = ctx.repo()
1294 repo = ctx.repo()
1295 stats = mergemod.merge(ctx, force=force, labels=labels)
1295 stats = mergemod.merge(ctx, force=force, labels=labels)
1296 _showstats(repo, stats)
1296 _showstats(repo, stats)
1297 if stats.unresolvedcount:
1297 if stats.unresolvedcount:
1298 repo.ui.status(
1298 repo.ui.status(
1299 _(
1299 _(
1300 b"use 'hg resolve' to retry unresolved file merges "
1300 b"use 'hg resolve' to retry unresolved file merges "
1301 b"or 'hg merge --abort' to abandon\n"
1301 b"or 'hg merge --abort' to abandon\n"
1302 )
1302 )
1303 )
1303 )
1304 elif remind:
1304 elif remind:
1305 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1305 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1306 return stats.unresolvedcount > 0
1306 return stats.unresolvedcount > 0
1307
1307
1308
1308
1309 def abortmerge(ui, repo):
1309 def abortmerge(ui, repo):
1310 ms = mergestatemod.mergestate.read(repo)
1310 ms = mergestatemod.mergestate.read(repo)
1311 if ms.active():
1311 if ms.active():
1312 # there were conflicts
1312 # there were conflicts
1313 node = ms.localctx.hex()
1313 node = ms.localctx.hex()
1314 else:
1314 else:
1315 # there were no conficts, mergestate was not stored
1315 # there were no conficts, mergestate was not stored
1316 node = repo[b'.'].hex()
1316 node = repo[b'.'].hex()
1317
1317
1318 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1318 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1319 stats = mergemod.clean_update(repo[node])
1319 stats = mergemod.clean_update(repo[node])
1320 assert stats.unresolvedcount == 0
1320 assert stats.unresolvedcount == 0
1321 _showstats(repo, stats)
1321 _showstats(repo, stats)
1322
1322
1323
1323
1324 def _incoming(
1324 def _incoming(
1325 displaychlist,
1325 displaychlist,
1326 subreporecurse,
1326 subreporecurse,
1327 ui,
1327 ui,
1328 repo,
1328 repo,
1329 source,
1329 source,
1330 opts,
1330 opts,
1331 buffered=False,
1331 buffered=False,
1332 subpath=None,
1332 subpath=None,
1333 ):
1333 ):
1334 """
1334 """
1335 Helper for incoming / gincoming.
1335 Helper for incoming / gincoming.
1336 displaychlist gets called with
1336 displaychlist gets called with
1337 (remoterepo, incomingchangesetlist, displayer) parameters,
1337 (remoterepo, incomingchangesetlist, displayer) parameters,
1338 and is supposed to contain only code that can't be unified.
1338 and is supposed to contain only code that can't be unified.
1339 """
1339 """
1340 srcs = urlutil.get_pull_paths(repo, ui, [source])
1340 srcs = urlutil.get_pull_paths(repo, ui, [source])
1341 srcs = list(srcs)
1341 srcs = list(srcs)
1342 if len(srcs) != 1:
1342 if len(srcs) != 1:
1343 msg = _(b'for now, incoming supports only a single source, %d provided')
1343 msg = _(b'for now, incoming supports only a single source, %d provided')
1344 msg %= len(srcs)
1344 msg %= len(srcs)
1345 raise error.Abort(msg)
1345 raise error.Abort(msg)
1346 path = srcs[0]
1346 path = srcs[0]
1347 if subpath is None:
1347 if subpath is None:
1348 peer_path = path
1348 peer_path = path
1349 url = path.loc
1349 url = path.loc
1350 else:
1350 else:
1351 # XXX path: we are losing the `path` object here. Keeping it would be
1351 # XXX path: we are losing the `path` object here. Keeping it would be
1352 # valuable. For example as a "variant" as we do for pushes.
1352 # valuable. For example as a "variant" as we do for pushes.
1353 subpath = urlutil.url(subpath)
1353 subpath = urlutil.url(subpath)
1354 if subpath.isabs():
1354 if subpath.isabs():
1355 peer_path = url = bytes(subpath)
1355 peer_path = url = bytes(subpath)
1356 else:
1356 else:
1357 p = urlutil.url(path.loc)
1357 p = urlutil.url(path.loc)
1358 if p.islocal():
1358 if p.islocal():
1359 normpath = os.path.normpath
1359 normpath = os.path.normpath
1360 else:
1360 else:
1361 normpath = posixpath.normpath
1361 normpath = posixpath.normpath
1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1362 p.path = normpath(b'%s/%s' % (p.path, subpath))
1363 peer_path = url = bytes(p)
1363 peer_path = url = bytes(p)
1364 other = peer(repo, opts, peer_path)
1364 other = peer(repo, opts, peer_path)
1365 cleanupfn = other.close
1365 cleanupfn = other.close
1366 try:
1366 try:
1367 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1367 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1368 branches = (path.branch, opts.get(b'branch', []))
1368 branches = (path.branch, opts.get(b'branch', []))
1369 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1369 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1370
1370
1371 if revs:
1371 if revs:
1372 revs = [other.lookup(rev) for rev in revs]
1372 revs = [other.lookup(rev) for rev in revs]
1373 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1373 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1374 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1374 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1375 )
1375 )
1376
1376
1377 if not chlist:
1377 if not chlist:
1378 ui.status(_(b"no changes found\n"))
1378 ui.status(_(b"no changes found\n"))
1379 return subreporecurse()
1379 return subreporecurse()
1380 ui.pager(b'incoming')
1380 ui.pager(b'incoming')
1381 displayer = logcmdutil.changesetdisplayer(
1381 displayer = logcmdutil.changesetdisplayer(
1382 ui, other, opts, buffered=buffered
1382 ui, other, opts, buffered=buffered
1383 )
1383 )
1384 displaychlist(other, chlist, displayer)
1384 displaychlist(other, chlist, displayer)
1385 displayer.close()
1385 displayer.close()
1386 finally:
1386 finally:
1387 cleanupfn()
1387 cleanupfn()
1388 subreporecurse()
1388 subreporecurse()
1389 return 0 # exit code is zero since we found incoming changes
1389 return 0 # exit code is zero since we found incoming changes
1390
1390
1391
1391
1392 def incoming(ui, repo, source, opts, subpath=None):
1392 def incoming(ui, repo, source, opts, subpath=None):
1393 def subreporecurse():
1393 def subreporecurse():
1394 ret = 1
1394 ret = 1
1395 if opts.get(b'subrepos'):
1395 if opts.get(b'subrepos'):
1396 ctx = repo[None]
1396 ctx = repo[None]
1397 for subpath in sorted(ctx.substate):
1397 for subpath in sorted(ctx.substate):
1398 sub = ctx.sub(subpath)
1398 sub = ctx.sub(subpath)
1399 ret = min(ret, sub.incoming(ui, source, opts))
1399 ret = min(ret, sub.incoming(ui, source, opts))
1400 return ret
1400 return ret
1401
1401
1402 def display(other, chlist, displayer):
1402 def display(other, chlist, displayer):
1403 limit = logcmdutil.getlimit(opts)
1403 limit = logcmdutil.getlimit(opts)
1404 if opts.get(b'newest_first'):
1404 if opts.get(b'newest_first'):
1405 chlist.reverse()
1405 chlist.reverse()
1406 count = 0
1406 count = 0
1407 for n in chlist:
1407 for n in chlist:
1408 if limit is not None and count >= limit:
1408 if limit is not None and count >= limit:
1409 break
1409 break
1410 parents = [
1410 parents = [
1411 p for p in other.changelog.parents(n) if p != repo.nullid
1411 p for p in other.changelog.parents(n) if p != repo.nullid
1412 ]
1412 ]
1413 if opts.get(b'no_merges') and len(parents) == 2:
1413 if opts.get(b'no_merges') and len(parents) == 2:
1414 continue
1414 continue
1415 count += 1
1415 count += 1
1416 displayer.show(other[n])
1416 displayer.show(other[n])
1417
1417
1418 return _incoming(
1418 return _incoming(
1419 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1419 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1420 )
1420 )
1421
1421
1422
1422
1423 def _outgoing(ui, repo, dests, opts, subpath=None):
1423 def _outgoing(ui, repo, dests, opts, subpath=None):
1424 out = set()
1424 out = set()
1425 others = []
1425 others = []
1426 for path in urlutil.get_push_paths(repo, ui, dests):
1426 for path in urlutil.get_push_paths(repo, ui, dests):
1427 dest = path.loc
1427 dest = path.loc
1428 repo._subtoppath = dest
1428 if subpath is not None:
1429 if subpath is not None:
1429 subpath = urlutil.url(subpath)
1430 subpath = urlutil.url(subpath)
1430 if subpath.isabs():
1431 if subpath.isabs():
1431 dest = bytes(subpath)
1432 dest = bytes(subpath)
1432 else:
1433 else:
1433 p = urlutil.url(dest)
1434 p = urlutil.url(dest)
1434 if p.islocal():
1435 if p.islocal():
1435 normpath = os.path.normpath
1436 normpath = os.path.normpath
1436 else:
1437 else:
1437 normpath = posixpath.normpath
1438 normpath = posixpath.normpath
1438 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 p.path = normpath(b'%s/%s' % (p.path, subpath))
1439 dest = bytes(p)
1440 dest = bytes(p)
1440 branches = path.branch, opts.get(b'branch') or []
1441 branches = path.branch, opts.get(b'branch') or []
1441
1442
1442 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1443 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1444 if revs:
1445 if revs:
1445 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1446
1447
1447 other = peer(repo, opts, dest)
1448 other = peer(repo, opts, dest)
1448 try:
1449 try:
1449 outgoing = discovery.findcommonoutgoing(
1450 outgoing = discovery.findcommonoutgoing(
1450 repo, other, revs, force=opts.get(b'force')
1451 repo, other, revs, force=opts.get(b'force')
1451 )
1452 )
1452 o = outgoing.missing
1453 o = outgoing.missing
1453 out.update(o)
1454 out.update(o)
1454 if not o:
1455 if not o:
1455 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1456 others.append(other)
1457 others.append(other)
1457 except: # re-raises
1458 except: # re-raises
1458 other.close()
1459 other.close()
1459 raise
1460 raise
1460 # make sure this is ordered by revision number
1461 # make sure this is ordered by revision number
1461 outgoing_revs = list(out)
1462 outgoing_revs = list(out)
1462 cl = repo.changelog
1463 cl = repo.changelog
1463 outgoing_revs.sort(key=cl.rev)
1464 outgoing_revs.sort(key=cl.rev)
1464 return outgoing_revs, others
1465 return outgoing_revs, others
1465
1466
1466
1467
1467 def _outgoing_recurse(ui, repo, dests, opts):
1468 def _outgoing_recurse(ui, repo, dests, opts):
1468 ret = 1
1469 ret = 1
1469 if opts.get(b'subrepos'):
1470 if opts.get(b'subrepos'):
1470 ctx = repo[None]
1471 ctx = repo[None]
1471 for subpath in sorted(ctx.substate):
1472 for subpath in sorted(ctx.substate):
1472 sub = ctx.sub(subpath)
1473 sub = ctx.sub(subpath)
1473 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 ret = min(ret, sub.outgoing(ui, dests, opts))
1474 return ret
1475 return ret
1475
1476
1476
1477
1477 def _outgoing_filter(repo, revs, opts):
1478 def _outgoing_filter(repo, revs, opts):
1478 """apply revision filtering/ordering option for outgoing"""
1479 """apply revision filtering/ordering option for outgoing"""
1479 limit = logcmdutil.getlimit(opts)
1480 limit = logcmdutil.getlimit(opts)
1480 no_merges = opts.get(b'no_merges')
1481 no_merges = opts.get(b'no_merges')
1481 if opts.get(b'newest_first'):
1482 if opts.get(b'newest_first'):
1482 revs.reverse()
1483 revs.reverse()
1483 if limit is None and not no_merges:
1484 if limit is None and not no_merges:
1484 for r in revs:
1485 for r in revs:
1485 yield r
1486 yield r
1486 return
1487 return
1487
1488
1488 count = 0
1489 count = 0
1489 cl = repo.changelog
1490 cl = repo.changelog
1490 for n in revs:
1491 for n in revs:
1491 if limit is not None and count >= limit:
1492 if limit is not None and count >= limit:
1492 break
1493 break
1493 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 parents = [p for p in cl.parents(n) if p != repo.nullid]
1494 if no_merges and len(parents) == 2:
1495 if no_merges and len(parents) == 2:
1495 continue
1496 continue
1496 count += 1
1497 count += 1
1497 yield n
1498 yield n
1498
1499
1499
1500
1500 def outgoing(ui, repo, dests, opts, subpath=None):
1501 def outgoing(ui, repo, dests, opts, subpath=None):
1501 if opts.get(b'graph'):
1502 if opts.get(b'graph'):
1502 logcmdutil.checkunsupportedgraphflags([], opts)
1503 logcmdutil.checkunsupportedgraphflags([], opts)
1503 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1504 ret = 1
1505 ret = 1
1505 try:
1506 try:
1506 if o:
1507 if o:
1507 ret = 0
1508 ret = 0
1508
1509
1509 if opts.get(b'graph'):
1510 if opts.get(b'graph'):
1510 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 revdag = logcmdutil.graphrevs(repo, o, opts)
1511 ui.pager(b'outgoing')
1512 ui.pager(b'outgoing')
1512 displayer = logcmdutil.changesetdisplayer(
1513 displayer = logcmdutil.changesetdisplayer(
1513 ui, repo, opts, buffered=True
1514 ui, repo, opts, buffered=True
1514 )
1515 )
1515 logcmdutil.displaygraph(
1516 logcmdutil.displaygraph(
1516 ui, repo, revdag, displayer, graphmod.asciiedges
1517 ui, repo, revdag, displayer, graphmod.asciiedges
1517 )
1518 )
1518 else:
1519 else:
1519 ui.pager(b'outgoing')
1520 ui.pager(b'outgoing')
1520 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1521 for n in _outgoing_filter(repo, o, opts):
1522 for n in _outgoing_filter(repo, o, opts):
1522 displayer.show(repo[n])
1523 displayer.show(repo[n])
1523 displayer.close()
1524 displayer.close()
1524 for oth in others:
1525 for oth in others:
1525 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1526 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1527 return ret # exit code is zero since we found outgoing changes
1528 return ret # exit code is zero since we found outgoing changes
1528 finally:
1529 finally:
1529 for oth in others:
1530 for oth in others:
1530 oth.close()
1531 oth.close()
1532 del repo._subtoppath
1531
1533
1532
1534
1533 def verify(repo, level=None):
1535 def verify(repo, level=None):
1534 """verify the consistency of a repository"""
1536 """verify the consistency of a repository"""
1535 ret = verifymod.verify(repo, level=level)
1537 ret = verifymod.verify(repo, level=level)
1536
1538
1537 # Broken subrepo references in hidden csets don't seem worth worrying about,
1539 # Broken subrepo references in hidden csets don't seem worth worrying about,
1538 # since they can't be pushed/pulled, and --hidden can be used if they are a
1540 # since they can't be pushed/pulled, and --hidden can be used if they are a
1539 # concern.
1541 # concern.
1540
1542
1541 # pathto() is needed for -R case
1543 # pathto() is needed for -R case
1542 revs = repo.revs(
1544 revs = repo.revs(
1543 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1545 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1544 )
1546 )
1545
1547
1546 if revs:
1548 if revs:
1547 repo.ui.status(_(b'checking subrepo links\n'))
1549 repo.ui.status(_(b'checking subrepo links\n'))
1548 for rev in revs:
1550 for rev in revs:
1549 ctx = repo[rev]
1551 ctx = repo[rev]
1550 try:
1552 try:
1551 for subpath in ctx.substate:
1553 for subpath in ctx.substate:
1552 try:
1554 try:
1553 ret = (
1555 ret = (
1554 ctx.sub(subpath, allowcreate=False).verify() or ret
1556 ctx.sub(subpath, allowcreate=False).verify() or ret
1555 )
1557 )
1556 except error.RepoError as e:
1558 except error.RepoError as e:
1557 repo.ui.warn(b'%d: %s\n' % (rev, e))
1559 repo.ui.warn(b'%d: %s\n' % (rev, e))
1558 except Exception:
1560 except Exception:
1559 repo.ui.warn(
1561 repo.ui.warn(
1560 _(b'.hgsubstate is corrupt in revision %s\n')
1562 _(b'.hgsubstate is corrupt in revision %s\n')
1561 % short(ctx.node())
1563 % short(ctx.node())
1562 )
1564 )
1563
1565
1564 return ret
1566 return ret
1565
1567
1566
1568
1567 def remoteui(src, opts):
1569 def remoteui(src, opts):
1568 """build a remote ui from ui or repo and opts"""
1570 """build a remote ui from ui or repo and opts"""
1569 if hasattr(src, 'baseui'): # looks like a repository
1571 if hasattr(src, 'baseui'): # looks like a repository
1570 dst = src.baseui.copy() # drop repo-specific config
1572 dst = src.baseui.copy() # drop repo-specific config
1571 src = src.ui # copy target options from repo
1573 src = src.ui # copy target options from repo
1572 else: # assume it's a global ui object
1574 else: # assume it's a global ui object
1573 dst = src.copy() # keep all global options
1575 dst = src.copy() # keep all global options
1574
1576
1575 # copy ssh-specific options
1577 # copy ssh-specific options
1576 for o in b'ssh', b'remotecmd':
1578 for o in b'ssh', b'remotecmd':
1577 v = opts.get(o) or src.config(b'ui', o)
1579 v = opts.get(o) or src.config(b'ui', o)
1578 if v:
1580 if v:
1579 dst.setconfig(b"ui", o, v, b'copied')
1581 dst.setconfig(b"ui", o, v, b'copied')
1580
1582
1581 # copy bundle-specific options
1583 # copy bundle-specific options
1582 r = src.config(b'bundle', b'mainreporoot')
1584 r = src.config(b'bundle', b'mainreporoot')
1583 if r:
1585 if r:
1584 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1586 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1585
1587
1586 # copy selected local settings to the remote ui
1588 # copy selected local settings to the remote ui
1587 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1589 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1588 for key, val in src.configitems(sect):
1590 for key, val in src.configitems(sect):
1589 dst.setconfig(sect, key, val, b'copied')
1591 dst.setconfig(sect, key, val, b'copied')
1590 v = src.config(b'web', b'cacerts')
1592 v = src.config(b'web', b'cacerts')
1591 if v:
1593 if v:
1592 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1594 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1593
1595
1594 return dst
1596 return dst
1595
1597
1596
1598
1597 # Files of interest
1599 # Files of interest
1598 # Used to check if the repository has changed looking at mtime and size of
1600 # Used to check if the repository has changed looking at mtime and size of
1599 # these files.
1601 # these files.
1600 foi = [
1602 foi = [
1601 ('spath', b'00changelog.i'),
1603 ('spath', b'00changelog.i'),
1602 ('spath', b'phaseroots'), # ! phase can change content at the same size
1604 ('spath', b'phaseroots'), # ! phase can change content at the same size
1603 ('spath', b'obsstore'),
1605 ('spath', b'obsstore'),
1604 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1606 ('path', b'bookmarks'), # ! bookmark can change content at the same size
1605 ]
1607 ]
1606
1608
1607
1609
1608 class cachedlocalrepo:
1610 class cachedlocalrepo:
1609 """Holds a localrepository that can be cached and reused."""
1611 """Holds a localrepository that can be cached and reused."""
1610
1612
1611 def __init__(self, repo):
1613 def __init__(self, repo):
1612 """Create a new cached repo from an existing repo.
1614 """Create a new cached repo from an existing repo.
1613
1615
1614 We assume the passed in repo was recently created. If the
1616 We assume the passed in repo was recently created. If the
1615 repo has changed between when it was created and when it was
1617 repo has changed between when it was created and when it was
1616 turned into a cache, it may not refresh properly.
1618 turned into a cache, it may not refresh properly.
1617 """
1619 """
1618 assert isinstance(repo, localrepo.localrepository)
1620 assert isinstance(repo, localrepo.localrepository)
1619 self._repo = repo
1621 self._repo = repo
1620 self._state, self.mtime = self._repostate()
1622 self._state, self.mtime = self._repostate()
1621 self._filtername = repo.filtername
1623 self._filtername = repo.filtername
1622
1624
1623 def fetch(self):
1625 def fetch(self):
1624 """Refresh (if necessary) and return a repository.
1626 """Refresh (if necessary) and return a repository.
1625
1627
1626 If the cached instance is out of date, it will be recreated
1628 If the cached instance is out of date, it will be recreated
1627 automatically and returned.
1629 automatically and returned.
1628
1630
1629 Returns a tuple of the repo and a boolean indicating whether a new
1631 Returns a tuple of the repo and a boolean indicating whether a new
1630 repo instance was created.
1632 repo instance was created.
1631 """
1633 """
1632 # We compare the mtimes and sizes of some well-known files to
1634 # We compare the mtimes and sizes of some well-known files to
1633 # determine if the repo changed. This is not precise, as mtimes
1635 # determine if the repo changed. This is not precise, as mtimes
1634 # are susceptible to clock skew and imprecise filesystems and
1636 # are susceptible to clock skew and imprecise filesystems and
1635 # file content can change while maintaining the same size.
1637 # file content can change while maintaining the same size.
1636
1638
1637 state, mtime = self._repostate()
1639 state, mtime = self._repostate()
1638 if state == self._state:
1640 if state == self._state:
1639 return self._repo, False
1641 return self._repo, False
1640
1642
1641 repo = repository(self._repo.baseui, self._repo.url())
1643 repo = repository(self._repo.baseui, self._repo.url())
1642 if self._filtername:
1644 if self._filtername:
1643 self._repo = repo.filtered(self._filtername)
1645 self._repo = repo.filtered(self._filtername)
1644 else:
1646 else:
1645 self._repo = repo.unfiltered()
1647 self._repo = repo.unfiltered()
1646 self._state = state
1648 self._state = state
1647 self.mtime = mtime
1649 self.mtime = mtime
1648
1650
1649 return self._repo, True
1651 return self._repo, True
1650
1652
1651 def _repostate(self):
1653 def _repostate(self):
1652 state = []
1654 state = []
1653 maxmtime = -1
1655 maxmtime = -1
1654 for attr, fname in foi:
1656 for attr, fname in foi:
1655 prefix = getattr(self._repo, attr)
1657 prefix = getattr(self._repo, attr)
1656 p = os.path.join(prefix, fname)
1658 p = os.path.join(prefix, fname)
1657 try:
1659 try:
1658 st = os.stat(p)
1660 st = os.stat(p)
1659 except OSError:
1661 except OSError:
1660 st = os.stat(prefix)
1662 st = os.stat(prefix)
1661 state.append((st[stat.ST_MTIME], st.st_size))
1663 state.append((st[stat.ST_MTIME], st.st_size))
1662 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1664 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1663
1665
1664 return tuple(state), maxmtime
1666 return tuple(state), maxmtime
1665
1667
1666 def copy(self):
1668 def copy(self):
1667 """Obtain a copy of this class instance.
1669 """Obtain a copy of this class instance.
1668
1670
1669 A new localrepository instance is obtained. The new instance should be
1671 A new localrepository instance is obtained. The new instance should be
1670 completely independent of the original.
1672 completely independent of the original.
1671 """
1673 """
1672 repo = repository(self._repo.baseui, self._repo.origroot)
1674 repo = repository(self._repo.baseui, self._repo.origroot)
1673 if self._filtername:
1675 if self._filtername:
1674 repo = repo.filtered(self._filtername)
1676 repo = repo.filtered(self._filtername)
1675 else:
1677 else:
1676 repo = repo.unfiltered()
1678 repo = repo.unfiltered()
1677 c = cachedlocalrepo(repo)
1679 c = cachedlocalrepo(repo)
1678 c._state = self._state
1680 c._state = self._state
1679 c.mtime = self.mtime
1681 c.mtime = self.mtime
1680 return c
1682 return c
General Comments 0
You need to be logged in to leave comments. Login now