##// END OF EJS Templates
peer: build a `path` object on the fly when needed...
marmoute -
r50649:ec30fe69 default
parent child Browse files
Show More
@@ -1,1665 +1,1668 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 u = urlutil.url(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
170 cls = peer_schemes[scheme]
171 cls.make_peer # make sure we load the module
171 cls.make_peer # make sure we load the module
172 elif scheme in repo_schemes:
172 elif scheme in repo_schemes:
173 cls = repo_schemes[scheme]
173 cls = repo_schemes[scheme]
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 else:
175 else:
176 cls = LocalFactory
176 cls = LocalFactory
177 if util.safehasattr(cls, 'islocal'):
177 if util.safehasattr(cls, 'islocal'):
178 return cls.islocal(repo) # pytype: disable=module-attr
178 return cls.islocal(repo) # pytype: disable=module-attr
179 return False
179 return False
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 return repo.local()
181 return repo.local()
182
182
183
183
184 def openpath(ui, path, sendaccept=True):
184 def openpath(ui, path, sendaccept=True):
185 '''open path with open if local, url.open if remote'''
185 '''open path with open if local, url.open if remote'''
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 if pathurl.islocal():
187 if pathurl.islocal():
188 return util.posixfile(pathurl.localpath(), b'rb')
188 return util.posixfile(pathurl.localpath(), b'rb')
189 else:
189 else:
190 return url.open(ui, path, sendaccept=sendaccept)
190 return url.open(ui, path, sendaccept=sendaccept)
191
191
192
192
193 # a list of (ui, repo) functions called for wire peer initialization
193 # a list of (ui, repo) functions called for wire peer initialization
194 wirepeersetupfuncs = []
194 wirepeersetupfuncs = []
195
195
196
196
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
199 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
200 f(ui, obj)
200 f(ui, obj)
201 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
203 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
206 if hook:
206 if hook:
207 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
208 hook(ui, obj)
208 hook(ui, obj)
209 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 if not obj.local():
212 if not obj.local():
213 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
214 f(ui, obj)
214 f(ui, obj)
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 scheme = urlutil.url(path).scheme
226 scheme = urlutil.url(path).scheme
227 if scheme is None:
227 if scheme is None:
228 scheme = b'file'
228 scheme = b'file'
229 cls = repo_schemes.get(scheme)
229 cls = repo_schemes.get(scheme)
230 if cls is None:
230 if cls is None:
231 if scheme in peer_schemes:
231 if scheme in peer_schemes:
232 raise error.Abort(_(b"repository '%s' is not local") % path)
232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 cls = LocalFactory
233 cls = LocalFactory
234 repo = cls.instance(
234 repo = cls.instance(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 intents=intents,
238 intents=intents,
239 createopts=createopts,
239 createopts=createopts,
240 )
240 )
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 return repo.filtered(b'visible')
242 return repo.filtered(b'visible')
243
243
244
244
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
246 '''return a repository peer for the specified path'''
246 '''return a repository peer for the specified path'''
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
247 rui = remoteui(uiorrepo, opts)
248 rui = remoteui(uiorrepo, opts)
248 if util.safehasattr(path, 'url'):
249 if util.safehasattr(path, 'url'):
249 # this is a urlutil.path object
250 # this is already a urlutil.path object
250 scheme = path.url.scheme # pytype: disable=attribute-error
251 peer_path = path
251 # XXX for now we don't do anything more than that
252 path = path.loc # pytype: disable=attribute-error
253 else:
252 else:
254 scheme = urlutil.url(path).scheme
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 if scheme in peer_schemes:
255 if scheme in peer_schemes:
256 cls = peer_schemes[scheme]
256 cls = peer_schemes[scheme]
257 peer = cls.make_peer(
257 peer = cls.make_peer(
258 rui,
258 rui,
259 path,
259 peer_path.loc,
260 create,
260 create,
261 intents=intents,
261 intents=intents,
262 createopts=createopts,
262 createopts=createopts,
263 )
263 )
264 _setup_repo_or_peer(rui, peer)
264 _setup_repo_or_peer(rui, peer)
265 else:
265 else:
266 # this is a repository
266 # this is a repository
267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 if not repo_path:
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
267 repo = repository(
270 repo = repository(
268 rui,
271 rui,
269 path,
272 repo_path,
270 create,
273 create,
271 intents=intents,
274 intents=intents,
272 createopts=createopts,
275 createopts=createopts,
273 )
276 )
274 peer = repo.peer()
277 peer = repo.peer()
275 return peer
278 return peer
276
279
277
280
278 def defaultdest(source):
281 def defaultdest(source):
279 """return default destination of clone if none is given
282 """return default destination of clone if none is given
280
283
281 >>> defaultdest(b'foo')
284 >>> defaultdest(b'foo')
282 'foo'
285 'foo'
283 >>> defaultdest(b'/foo/bar')
286 >>> defaultdest(b'/foo/bar')
284 'bar'
287 'bar'
285 >>> defaultdest(b'/')
288 >>> defaultdest(b'/')
286 ''
289 ''
287 >>> defaultdest(b'')
290 >>> defaultdest(b'')
288 ''
291 ''
289 >>> defaultdest(b'http://example.org/')
292 >>> defaultdest(b'http://example.org/')
290 ''
293 ''
291 >>> defaultdest(b'http://example.org/foo/')
294 >>> defaultdest(b'http://example.org/foo/')
292 'foo'
295 'foo'
293 """
296 """
294 path = urlutil.url(source).path
297 path = urlutil.url(source).path
295 if not path:
298 if not path:
296 return b''
299 return b''
297 return os.path.basename(os.path.normpath(path))
300 return os.path.basename(os.path.normpath(path))
298
301
299
302
300 def sharedreposource(repo):
303 def sharedreposource(repo):
301 """Returns repository object for source repository of a shared repo.
304 """Returns repository object for source repository of a shared repo.
302
305
303 If repo is not a shared repository, returns None.
306 If repo is not a shared repository, returns None.
304 """
307 """
305 if repo.sharedpath == repo.path:
308 if repo.sharedpath == repo.path:
306 return None
309 return None
307
310
308 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
309 return repo.srcrepo
312 return repo.srcrepo
310
313
311 # the sharedpath always ends in the .hg; we want the path to the repo
314 # the sharedpath always ends in the .hg; we want the path to the repo
312 source = repo.vfs.split(repo.sharedpath)[0]
315 source = repo.vfs.split(repo.sharedpath)[0]
313 srcurl, branches = urlutil.parseurl(source)
316 srcurl, branches = urlutil.parseurl(source)
314 srcrepo = repository(repo.ui, srcurl)
317 srcrepo = repository(repo.ui, srcurl)
315 repo.srcrepo = srcrepo
318 repo.srcrepo = srcrepo
316 return srcrepo
319 return srcrepo
317
320
318
321
319 def share(
322 def share(
320 ui,
323 ui,
321 source,
324 source,
322 dest=None,
325 dest=None,
323 update=True,
326 update=True,
324 bookmarks=True,
327 bookmarks=True,
325 defaultpath=None,
328 defaultpath=None,
326 relative=False,
329 relative=False,
327 ):
330 ):
328 '''create a shared repository'''
331 '''create a shared repository'''
329
332
330 not_local_msg = _(b'can only share local repositories')
333 not_local_msg = _(b'can only share local repositories')
331 if util.safehasattr(source, 'local'):
334 if util.safehasattr(source, 'local'):
332 if source.local() is None:
335 if source.local() is None:
333 raise error.Abort(not_local_msg)
336 raise error.Abort(not_local_msg)
334 elif not islocal(source):
337 elif not islocal(source):
335 # XXX why are we getting bytes here ?
338 # XXX why are we getting bytes here ?
336 raise error.Abort(not_local_msg)
339 raise error.Abort(not_local_msg)
337
340
338 if not dest:
341 if not dest:
339 dest = defaultdest(source)
342 dest = defaultdest(source)
340 else:
343 else:
341 dest = urlutil.get_clone_path_obj(ui, dest).loc
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
342
345
343 if isinstance(source, bytes):
346 if isinstance(source, bytes):
344 source_path = urlutil.get_clone_path_obj(ui, source)
347 source_path = urlutil.get_clone_path_obj(ui, source)
345 srcrepo = repository(ui, source_path.loc)
348 srcrepo = repository(ui, source_path.loc)
346 branches = (source_path.branch, [])
349 branches = (source_path.branch, [])
347 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
348 else:
351 else:
349 srcrepo = source.local()
352 srcrepo = source.local()
350 checkout = None
353 checkout = None
351
354
352 shareditems = set()
355 shareditems = set()
353 if bookmarks:
356 if bookmarks:
354 shareditems.add(sharedbookmarks)
357 shareditems.add(sharedbookmarks)
355
358
356 r = repository(
359 r = repository(
357 ui,
360 ui,
358 dest,
361 dest,
359 create=True,
362 create=True,
360 createopts={
363 createopts={
361 b'sharedrepo': srcrepo,
364 b'sharedrepo': srcrepo,
362 b'sharedrelative': relative,
365 b'sharedrelative': relative,
363 b'shareditems': shareditems,
366 b'shareditems': shareditems,
364 },
367 },
365 )
368 )
366
369
367 postshare(srcrepo, r, defaultpath=defaultpath)
370 postshare(srcrepo, r, defaultpath=defaultpath)
368 r = repository(ui, dest)
371 r = repository(ui, dest)
369 _postshareupdate(r, update, checkout=checkout)
372 _postshareupdate(r, update, checkout=checkout)
370 return r
373 return r
371
374
372
375
373 def _prependsourcehgrc(repo):
376 def _prependsourcehgrc(repo):
374 """copies the source repo config and prepend it in current repo .hg/hgrc
377 """copies the source repo config and prepend it in current repo .hg/hgrc
375 on unshare. This is only done if the share was perfomed using share safe
378 on unshare. This is only done if the share was perfomed using share safe
376 method where we share config of source in shares"""
379 method where we share config of source in shares"""
377 srcvfs = vfsmod.vfs(repo.sharedpath)
380 srcvfs = vfsmod.vfs(repo.sharedpath)
378 dstvfs = vfsmod.vfs(repo.path)
381 dstvfs = vfsmod.vfs(repo.path)
379
382
380 if not srcvfs.exists(b'hgrc'):
383 if not srcvfs.exists(b'hgrc'):
381 return
384 return
382
385
383 currentconfig = b''
386 currentconfig = b''
384 if dstvfs.exists(b'hgrc'):
387 if dstvfs.exists(b'hgrc'):
385 currentconfig = dstvfs.read(b'hgrc')
388 currentconfig = dstvfs.read(b'hgrc')
386
389
387 with dstvfs(b'hgrc', b'wb') as fp:
390 with dstvfs(b'hgrc', b'wb') as fp:
388 sourceconfig = srcvfs.read(b'hgrc')
391 sourceconfig = srcvfs.read(b'hgrc')
389 fp.write(b"# Config copied from shared source\n")
392 fp.write(b"# Config copied from shared source\n")
390 fp.write(sourceconfig)
393 fp.write(sourceconfig)
391 fp.write(b'\n')
394 fp.write(b'\n')
392 fp.write(currentconfig)
395 fp.write(currentconfig)
393
396
394
397
395 def unshare(ui, repo):
398 def unshare(ui, repo):
396 """convert a shared repository to a normal one
399 """convert a shared repository to a normal one
397
400
398 Copy the store data to the repo and remove the sharedpath data.
401 Copy the store data to the repo and remove the sharedpath data.
399
402
400 Returns a new repository object representing the unshared repository.
403 Returns a new repository object representing the unshared repository.
401
404
402 The passed repository object is not usable after this function is
405 The passed repository object is not usable after this function is
403 called.
406 called.
404 """
407 """
405
408
406 with repo.lock():
409 with repo.lock():
407 # we use locks here because if we race with commit, we
410 # we use locks here because if we race with commit, we
408 # can end up with extra data in the cloned revlogs that's
411 # can end up with extra data in the cloned revlogs that's
409 # not pointed to by changesets, thus causing verify to
412 # not pointed to by changesets, thus causing verify to
410 # fail
413 # fail
411 destlock = copystore(ui, repo, repo.path)
414 destlock = copystore(ui, repo, repo.path)
412 with destlock or util.nullcontextmanager():
415 with destlock or util.nullcontextmanager():
413 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
414 # we were sharing .hg/hgrc of the share source with the current
417 # we were sharing .hg/hgrc of the share source with the current
415 # repo. We need to copy that while unsharing otherwise it can
418 # repo. We need to copy that while unsharing otherwise it can
416 # disable hooks and other checks
419 # disable hooks and other checks
417 _prependsourcehgrc(repo)
420 _prependsourcehgrc(repo)
418
421
419 sharefile = repo.vfs.join(b'sharedpath')
422 sharefile = repo.vfs.join(b'sharedpath')
420 util.rename(sharefile, sharefile + b'.old')
423 util.rename(sharefile, sharefile + b'.old')
421
424
422 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
423 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
424 scmutil.writereporequirements(repo)
427 scmutil.writereporequirements(repo)
425
428
426 # Removing share changes some fundamental properties of the repo instance.
429 # Removing share changes some fundamental properties of the repo instance.
427 # So we instantiate a new repo object and operate on it rather than
430 # So we instantiate a new repo object and operate on it rather than
428 # try to keep the existing repo usable.
431 # try to keep the existing repo usable.
429 newrepo = repository(repo.baseui, repo.root, create=False)
432 newrepo = repository(repo.baseui, repo.root, create=False)
430
433
431 # TODO: figure out how to access subrepos that exist, but were previously
434 # TODO: figure out how to access subrepos that exist, but were previously
432 # removed from .hgsub
435 # removed from .hgsub
433 c = newrepo[b'.']
436 c = newrepo[b'.']
434 subs = c.substate
437 subs = c.substate
435 for s in sorted(subs):
438 for s in sorted(subs):
436 c.sub(s).unshare()
439 c.sub(s).unshare()
437
440
438 localrepo.poisonrepository(repo)
441 localrepo.poisonrepository(repo)
439
442
440 return newrepo
443 return newrepo
441
444
442
445
443 def postshare(sourcerepo, destrepo, defaultpath=None):
446 def postshare(sourcerepo, destrepo, defaultpath=None):
444 """Called after a new shared repo is created.
447 """Called after a new shared repo is created.
445
448
446 The new repo only has a requirements file and pointer to the source.
449 The new repo only has a requirements file and pointer to the source.
447 This function configures additional shared data.
450 This function configures additional shared data.
448
451
449 Extensions can wrap this function and write additional entries to
452 Extensions can wrap this function and write additional entries to
450 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
451 """
454 """
452 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
453 if default:
456 if default:
454 template = b'[paths]\ndefault = %s\n'
457 template = b'[paths]\ndefault = %s\n'
455 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
456 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
457 with destrepo.wlock():
460 with destrepo.wlock():
458 narrowspec.copytoworkingcopy(destrepo)
461 narrowspec.copytoworkingcopy(destrepo)
459
462
460
463
461 def _postshareupdate(repo, update, checkout=None):
464 def _postshareupdate(repo, update, checkout=None):
462 """Maybe perform a working directory update after a shared repo is created.
465 """Maybe perform a working directory update after a shared repo is created.
463
466
464 ``update`` can be a boolean or a revision to update to.
467 ``update`` can be a boolean or a revision to update to.
465 """
468 """
466 if not update:
469 if not update:
467 return
470 return
468
471
469 repo.ui.status(_(b"updating working directory\n"))
472 repo.ui.status(_(b"updating working directory\n"))
470 if update is not True:
473 if update is not True:
471 checkout = update
474 checkout = update
472 for test in (checkout, b'default', b'tip'):
475 for test in (checkout, b'default', b'tip'):
473 if test is None:
476 if test is None:
474 continue
477 continue
475 try:
478 try:
476 uprev = repo.lookup(test)
479 uprev = repo.lookup(test)
477 break
480 break
478 except error.RepoLookupError:
481 except error.RepoLookupError:
479 continue
482 continue
480 _update(repo, uprev)
483 _update(repo, uprev)
481
484
482
485
483 def copystore(ui, srcrepo, destpath):
486 def copystore(ui, srcrepo, destpath):
484 """copy files from store of srcrepo in destpath
487 """copy files from store of srcrepo in destpath
485
488
486 returns destlock
489 returns destlock
487 """
490 """
488 destlock = None
491 destlock = None
489 try:
492 try:
490 hardlink = None
493 hardlink = None
491 topic = _(b'linking') if hardlink else _(b'copying')
494 topic = _(b'linking') if hardlink else _(b'copying')
492 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
493 num = 0
496 num = 0
494 srcpublishing = srcrepo.publishing()
497 srcpublishing = srcrepo.publishing()
495 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
496 dstvfs = vfsmod.vfs(destpath)
499 dstvfs = vfsmod.vfs(destpath)
497 for f in srcrepo.store.copylist():
500 for f in srcrepo.store.copylist():
498 if srcpublishing and f.endswith(b'phaseroots'):
501 if srcpublishing and f.endswith(b'phaseroots'):
499 continue
502 continue
500 dstbase = os.path.dirname(f)
503 dstbase = os.path.dirname(f)
501 if dstbase and not dstvfs.exists(dstbase):
504 if dstbase and not dstvfs.exists(dstbase):
502 dstvfs.mkdir(dstbase)
505 dstvfs.mkdir(dstbase)
503 if srcvfs.exists(f):
506 if srcvfs.exists(f):
504 if f.endswith(b'data'):
507 if f.endswith(b'data'):
505 # 'dstbase' may be empty (e.g. revlog format 0)
508 # 'dstbase' may be empty (e.g. revlog format 0)
506 lockfile = os.path.join(dstbase, b"lock")
509 lockfile = os.path.join(dstbase, b"lock")
507 # lock to avoid premature writing to the target
510 # lock to avoid premature writing to the target
508 destlock = lock.lock(dstvfs, lockfile)
511 destlock = lock.lock(dstvfs, lockfile)
509 hardlink, n = util.copyfiles(
512 hardlink, n = util.copyfiles(
510 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
511 )
514 )
512 num += n
515 num += n
513 if hardlink:
516 if hardlink:
514 ui.debug(b"linked %d files\n" % num)
517 ui.debug(b"linked %d files\n" % num)
515 else:
518 else:
516 ui.debug(b"copied %d files\n" % num)
519 ui.debug(b"copied %d files\n" % num)
517 return destlock
520 return destlock
518 except: # re-raises
521 except: # re-raises
519 release(destlock)
522 release(destlock)
520 raise
523 raise
521
524
522
525
523 def clonewithshare(
526 def clonewithshare(
524 ui,
527 ui,
525 peeropts,
528 peeropts,
526 sharepath,
529 sharepath,
527 source,
530 source,
528 srcpeer,
531 srcpeer,
529 dest,
532 dest,
530 pull=False,
533 pull=False,
531 rev=None,
534 rev=None,
532 update=True,
535 update=True,
533 stream=False,
536 stream=False,
534 ):
537 ):
535 """Perform a clone using a shared repo.
538 """Perform a clone using a shared repo.
536
539
537 The store for the repository will be located at <sharepath>/.hg. The
540 The store for the repository will be located at <sharepath>/.hg. The
538 specified revisions will be cloned or pulled from "source". A shared repo
541 specified revisions will be cloned or pulled from "source". A shared repo
539 will be created at "dest" and a working copy will be created if "update" is
542 will be created at "dest" and a working copy will be created if "update" is
540 True.
543 True.
541 """
544 """
542 revs = None
545 revs = None
543 if rev:
546 if rev:
544 if not srcpeer.capable(b'lookup'):
547 if not srcpeer.capable(b'lookup'):
545 raise error.Abort(
548 raise error.Abort(
546 _(
549 _(
547 b"src repository does not support "
550 b"src repository does not support "
548 b"revision lookup and so doesn't "
551 b"revision lookup and so doesn't "
549 b"support clone by revision"
552 b"support clone by revision"
550 )
553 )
551 )
554 )
552
555
553 # TODO this is batchable.
556 # TODO this is batchable.
554 remoterevs = []
557 remoterevs = []
555 for r in rev:
558 for r in rev:
556 with srcpeer.commandexecutor() as e:
559 with srcpeer.commandexecutor() as e:
557 remoterevs.append(
560 remoterevs.append(
558 e.callcommand(
561 e.callcommand(
559 b'lookup',
562 b'lookup',
560 {
563 {
561 b'key': r,
564 b'key': r,
562 },
565 },
563 ).result()
566 ).result()
564 )
567 )
565 revs = remoterevs
568 revs = remoterevs
566
569
567 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # Obtain a lock before checking for or cloning the pooled repo otherwise
568 # 2 clients may race creating or populating it.
571 # 2 clients may race creating or populating it.
569 pooldir = os.path.dirname(sharepath)
572 pooldir = os.path.dirname(sharepath)
570 # lock class requires the directory to exist.
573 # lock class requires the directory to exist.
571 try:
574 try:
572 util.makedir(pooldir, False)
575 util.makedir(pooldir, False)
573 except FileExistsError:
576 except FileExistsError:
574 pass
577 pass
575
578
576 poolvfs = vfsmod.vfs(pooldir)
579 poolvfs = vfsmod.vfs(pooldir)
577 basename = os.path.basename(sharepath)
580 basename = os.path.basename(sharepath)
578
581
579 with lock.lock(poolvfs, b'%s.lock' % basename):
582 with lock.lock(poolvfs, b'%s.lock' % basename):
580 if os.path.exists(sharepath):
583 if os.path.exists(sharepath):
581 ui.status(
584 ui.status(
582 _(b'(sharing from existing pooled repository %s)\n') % basename
585 _(b'(sharing from existing pooled repository %s)\n') % basename
583 )
586 )
584 else:
587 else:
585 ui.status(
588 ui.status(
586 _(b'(sharing from new pooled repository %s)\n') % basename
589 _(b'(sharing from new pooled repository %s)\n') % basename
587 )
590 )
588 # Always use pull mode because hardlinks in share mode don't work
591 # Always use pull mode because hardlinks in share mode don't work
589 # well. Never update because working copies aren't necessary in
592 # well. Never update because working copies aren't necessary in
590 # share mode.
593 # share mode.
591 clone(
594 clone(
592 ui,
595 ui,
593 peeropts,
596 peeropts,
594 source,
597 source,
595 dest=sharepath,
598 dest=sharepath,
596 pull=True,
599 pull=True,
597 revs=rev,
600 revs=rev,
598 update=False,
601 update=False,
599 stream=stream,
602 stream=stream,
600 )
603 )
601
604
602 # Resolve the value to put in [paths] section for the source.
605 # Resolve the value to put in [paths] section for the source.
603 if islocal(source):
606 if islocal(source):
604 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 defaultpath = util.abspath(urlutil.urllocalpath(source))
605 else:
608 else:
606 defaultpath = source
609 defaultpath = source
607
610
608 sharerepo = repository(ui, path=sharepath)
611 sharerepo = repository(ui, path=sharepath)
609 destrepo = share(
612 destrepo = share(
610 ui,
613 ui,
611 sharerepo,
614 sharerepo,
612 dest=dest,
615 dest=dest,
613 update=False,
616 update=False,
614 bookmarks=False,
617 bookmarks=False,
615 defaultpath=defaultpath,
618 defaultpath=defaultpath,
616 )
619 )
617
620
618 # We need to perform a pull against the dest repo to fetch bookmarks
621 # We need to perform a pull against the dest repo to fetch bookmarks
619 # and other non-store data that isn't shared by default. In the case of
622 # and other non-store data that isn't shared by default. In the case of
620 # non-existing shared repo, this means we pull from the remote twice. This
623 # non-existing shared repo, this means we pull from the remote twice. This
621 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # is a bit weird. But at the time it was implemented, there wasn't an easy
622 # way to pull just non-changegroup data.
625 # way to pull just non-changegroup data.
623 exchange.pull(destrepo, srcpeer, heads=revs)
626 exchange.pull(destrepo, srcpeer, heads=revs)
624
627
625 _postshareupdate(destrepo, update)
628 _postshareupdate(destrepo, update)
626
629
627 return srcpeer, peer(ui, peeropts, dest)
630 return srcpeer, peer(ui, peeropts, dest)
628
631
629
632
630 # Recomputing caches is often slow on big repos, so copy them.
633 # Recomputing caches is often slow on big repos, so copy them.
631 def _copycache(srcrepo, dstcachedir, fname):
634 def _copycache(srcrepo, dstcachedir, fname):
632 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 """copy a cache from srcrepo to destcachedir (if it exists)"""
633 srcfname = srcrepo.cachevfs.join(fname)
636 srcfname = srcrepo.cachevfs.join(fname)
634 dstfname = os.path.join(dstcachedir, fname)
637 dstfname = os.path.join(dstcachedir, fname)
635 if os.path.exists(srcfname):
638 if os.path.exists(srcfname):
636 if not os.path.exists(dstcachedir):
639 if not os.path.exists(dstcachedir):
637 os.mkdir(dstcachedir)
640 os.mkdir(dstcachedir)
638 util.copyfile(srcfname, dstfname)
641 util.copyfile(srcfname, dstfname)
639
642
640
643
641 def clone(
644 def clone(
642 ui,
645 ui,
643 peeropts,
646 peeropts,
644 source,
647 source,
645 dest=None,
648 dest=None,
646 pull=False,
649 pull=False,
647 revs=None,
650 revs=None,
648 update=True,
651 update=True,
649 stream=False,
652 stream=False,
650 branch=None,
653 branch=None,
651 shareopts=None,
654 shareopts=None,
652 storeincludepats=None,
655 storeincludepats=None,
653 storeexcludepats=None,
656 storeexcludepats=None,
654 depth=None,
657 depth=None,
655 ):
658 ):
656 """Make a copy of an existing repository.
659 """Make a copy of an existing repository.
657
660
658 Create a copy of an existing repository in a new directory. The
661 Create a copy of an existing repository in a new directory. The
659 source and destination are URLs, as passed to the repository
662 source and destination are URLs, as passed to the repository
660 function. Returns a pair of repository peers, the source and
663 function. Returns a pair of repository peers, the source and
661 newly created destination.
664 newly created destination.
662
665
663 The location of the source is added to the new repository's
666 The location of the source is added to the new repository's
664 .hg/hgrc file, as the default to be used for future pulls and
667 .hg/hgrc file, as the default to be used for future pulls and
665 pushes.
668 pushes.
666
669
667 If an exception is raised, the partly cloned/updated destination
670 If an exception is raised, the partly cloned/updated destination
668 repository will be deleted.
671 repository will be deleted.
669
672
670 Arguments:
673 Arguments:
671
674
672 source: repository object or URL
675 source: repository object or URL
673
676
674 dest: URL of destination repository to create (defaults to base
677 dest: URL of destination repository to create (defaults to base
675 name of source repository)
678 name of source repository)
676
679
677 pull: always pull from source repository, even in local case or if the
680 pull: always pull from source repository, even in local case or if the
678 server prefers streaming
681 server prefers streaming
679
682
680 stream: stream raw data uncompressed from repository (fast over
683 stream: stream raw data uncompressed from repository (fast over
681 LAN, slow over WAN)
684 LAN, slow over WAN)
682
685
683 revs: revision to clone up to (implies pull=True)
686 revs: revision to clone up to (implies pull=True)
684
687
685 update: update working directory after clone completes, if
688 update: update working directory after clone completes, if
686 destination is local repository (True means update to default rev,
689 destination is local repository (True means update to default rev,
687 anything else is treated as a revision)
690 anything else is treated as a revision)
688
691
689 branch: branches to clone
692 branch: branches to clone
690
693
691 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 shareopts: dict of options to control auto sharing behavior. The "pool" key
692 activates auto sharing mode and defines the directory for stores. The
695 activates auto sharing mode and defines the directory for stores. The
693 "mode" key determines how to construct the directory name of the shared
696 "mode" key determines how to construct the directory name of the shared
694 repository. "identity" means the name is derived from the node of the first
697 repository. "identity" means the name is derived from the node of the first
695 changeset in the repository. "remote" means the name is derived from the
698 changeset in the repository. "remote" means the name is derived from the
696 remote's path/URL. Defaults to "identity."
699 remote's path/URL. Defaults to "identity."
697
700
698 storeincludepats and storeexcludepats: sets of file patterns to include and
701 storeincludepats and storeexcludepats: sets of file patterns to include and
699 exclude in the repository copy, respectively. If not defined, all files
702 exclude in the repository copy, respectively. If not defined, all files
700 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 will be included (a "full" clone). Otherwise a "narrow" clone containing
701 only the requested files will be performed. If ``storeincludepats`` is not
704 only the requested files will be performed. If ``storeincludepats`` is not
702 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
703 ``path:.``. If both are empty sets, no files will be cloned.
706 ``path:.``. If both are empty sets, no files will be cloned.
704 """
707 """
705
708
706 if isinstance(source, bytes):
709 if isinstance(source, bytes):
707 src_path = urlutil.get_clone_path_obj(ui, source)
710 src_path = urlutil.get_clone_path_obj(ui, source)
708 if src_path is None:
711 if src_path is None:
709 srcpeer = peer(ui, peeropts, b'')
712 srcpeer = peer(ui, peeropts, b'')
710 origsource = source = b''
713 origsource = source = b''
711 branches = (None, branch or [])
714 branches = (None, branch or [])
712 else:
715 else:
713 srcpeer = peer(ui, peeropts, src_path)
716 srcpeer = peer(ui, peeropts, src_path)
714 origsource = src_path.rawloc
717 origsource = src_path.rawloc
715 branches = (src_path.branch, branch or [])
718 branches = (src_path.branch, branch or [])
716 source = src_path.loc
719 source = src_path.loc
717 else:
720 else:
718 if util.safehasattr(source, 'peer'):
721 if util.safehasattr(source, 'peer'):
719 srcpeer = source.peer() # in case we were called with a localrepo
722 srcpeer = source.peer() # in case we were called with a localrepo
720 else:
723 else:
721 srcpeer = source
724 srcpeer = source
722 branches = (None, branch or [])
725 branches = (None, branch or [])
723 # XXX path: simply use the peer `path` object when this become available
726 # XXX path: simply use the peer `path` object when this become available
724 origsource = source = srcpeer.url()
727 origsource = source = srcpeer.url()
725 srclock = destlock = destwlock = cleandir = None
728 srclock = destlock = destwlock = cleandir = None
726 destpeer = None
729 destpeer = None
727 try:
730 try:
728 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
729
732
730 if dest is None:
733 if dest is None:
731 dest = defaultdest(source)
734 dest = defaultdest(source)
732 if dest:
735 if dest:
733 ui.status(_(b"destination directory: %s\n") % dest)
736 ui.status(_(b"destination directory: %s\n") % dest)
734 else:
737 else:
735 dest_path = urlutil.get_clone_path_obj(ui, dest)
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
736 if dest_path is not None:
739 if dest_path is not None:
737 dest = dest_path.rawloc
740 dest = dest_path.rawloc
738 else:
741 else:
739 dest = b''
742 dest = b''
740
743
741 dest = urlutil.urllocalpath(dest)
744 dest = urlutil.urllocalpath(dest)
742 source = urlutil.urllocalpath(source)
745 source = urlutil.urllocalpath(source)
743
746
744 if not dest:
747 if not dest:
745 raise error.InputError(_(b"empty destination path is not valid"))
748 raise error.InputError(_(b"empty destination path is not valid"))
746
749
747 destvfs = vfsmod.vfs(dest, expandpath=True)
750 destvfs = vfsmod.vfs(dest, expandpath=True)
748 if destvfs.lexists():
751 if destvfs.lexists():
749 if not destvfs.isdir():
752 if not destvfs.isdir():
750 raise error.InputError(
753 raise error.InputError(
751 _(b"destination '%s' already exists") % dest
754 _(b"destination '%s' already exists") % dest
752 )
755 )
753 elif destvfs.listdir():
756 elif destvfs.listdir():
754 raise error.InputError(
757 raise error.InputError(
755 _(b"destination '%s' is not empty") % dest
758 _(b"destination '%s' is not empty") % dest
756 )
759 )
757
760
758 createopts = {}
761 createopts = {}
759 narrow = False
762 narrow = False
760
763
761 if storeincludepats is not None:
764 if storeincludepats is not None:
762 narrowspec.validatepatterns(storeincludepats)
765 narrowspec.validatepatterns(storeincludepats)
763 narrow = True
766 narrow = True
764
767
765 if storeexcludepats is not None:
768 if storeexcludepats is not None:
766 narrowspec.validatepatterns(storeexcludepats)
769 narrowspec.validatepatterns(storeexcludepats)
767 narrow = True
770 narrow = True
768
771
769 if narrow:
772 if narrow:
770 # Include everything by default if only exclusion patterns defined.
773 # Include everything by default if only exclusion patterns defined.
771 if storeexcludepats and not storeincludepats:
774 if storeexcludepats and not storeincludepats:
772 storeincludepats = {b'path:.'}
775 storeincludepats = {b'path:.'}
773
776
774 createopts[b'narrowfiles'] = True
777 createopts[b'narrowfiles'] = True
775
778
776 if depth:
779 if depth:
777 createopts[b'shallowfilestore'] = True
780 createopts[b'shallowfilestore'] = True
778
781
779 if srcpeer.capable(b'lfs-serve'):
782 if srcpeer.capable(b'lfs-serve'):
780 # Repository creation honors the config if it disabled the extension, so
783 # Repository creation honors the config if it disabled the extension, so
781 # we can't just announce that lfs will be enabled. This check avoids
784 # we can't just announce that lfs will be enabled. This check avoids
782 # saying that lfs will be enabled, and then saying it's an unknown
785 # saying that lfs will be enabled, and then saying it's an unknown
783 # feature. The lfs creation option is set in either case so that a
786 # feature. The lfs creation option is set in either case so that a
784 # requirement is added. If the extension is explicitly disabled but the
787 # requirement is added. If the extension is explicitly disabled but the
785 # requirement is set, the clone aborts early, before transferring any
788 # requirement is set, the clone aborts early, before transferring any
786 # data.
789 # data.
787 createopts[b'lfs'] = True
790 createopts[b'lfs'] = True
788
791
789 if extensions.disabled_help(b'lfs'):
792 if extensions.disabled_help(b'lfs'):
790 ui.status(
793 ui.status(
791 _(
794 _(
792 b'(remote is using large file support (lfs), but it is '
795 b'(remote is using large file support (lfs), but it is '
793 b'explicitly disabled in the local configuration)\n'
796 b'explicitly disabled in the local configuration)\n'
794 )
797 )
795 )
798 )
796 else:
799 else:
797 ui.status(
800 ui.status(
798 _(
801 _(
799 b'(remote is using large file support (lfs); lfs will '
802 b'(remote is using large file support (lfs); lfs will '
800 b'be enabled for this repository)\n'
803 b'be enabled for this repository)\n'
801 )
804 )
802 )
805 )
803
806
804 shareopts = shareopts or {}
807 shareopts = shareopts or {}
805 sharepool = shareopts.get(b'pool')
808 sharepool = shareopts.get(b'pool')
806 sharenamemode = shareopts.get(b'mode')
809 sharenamemode = shareopts.get(b'mode')
807 if sharepool and islocal(dest):
810 if sharepool and islocal(dest):
808 sharepath = None
811 sharepath = None
809 if sharenamemode == b'identity':
812 if sharenamemode == b'identity':
810 # Resolve the name from the initial changeset in the remote
813 # Resolve the name from the initial changeset in the remote
811 # repository. This returns nullid when the remote is empty. It
814 # repository. This returns nullid when the remote is empty. It
812 # raises RepoLookupError if revision 0 is filtered or otherwise
815 # raises RepoLookupError if revision 0 is filtered or otherwise
813 # not available. If we fail to resolve, sharing is not enabled.
816 # not available. If we fail to resolve, sharing is not enabled.
814 try:
817 try:
815 with srcpeer.commandexecutor() as e:
818 with srcpeer.commandexecutor() as e:
816 rootnode = e.callcommand(
819 rootnode = e.callcommand(
817 b'lookup',
820 b'lookup',
818 {
821 {
819 b'key': b'0',
822 b'key': b'0',
820 },
823 },
821 ).result()
824 ).result()
822
825
823 if rootnode != sha1nodeconstants.nullid:
826 if rootnode != sha1nodeconstants.nullid:
824 sharepath = os.path.join(sharepool, hex(rootnode))
827 sharepath = os.path.join(sharepool, hex(rootnode))
825 else:
828 else:
826 ui.status(
829 ui.status(
827 _(
830 _(
828 b'(not using pooled storage: '
831 b'(not using pooled storage: '
829 b'remote appears to be empty)\n'
832 b'remote appears to be empty)\n'
830 )
833 )
831 )
834 )
832 except error.RepoLookupError:
835 except error.RepoLookupError:
833 ui.status(
836 ui.status(
834 _(
837 _(
835 b'(not using pooled storage: '
838 b'(not using pooled storage: '
836 b'unable to resolve identity of remote)\n'
839 b'unable to resolve identity of remote)\n'
837 )
840 )
838 )
841 )
839 elif sharenamemode == b'remote':
842 elif sharenamemode == b'remote':
840 sharepath = os.path.join(
843 sharepath = os.path.join(
841 sharepool, hex(hashutil.sha1(source).digest())
844 sharepool, hex(hashutil.sha1(source).digest())
842 )
845 )
843 else:
846 else:
844 raise error.Abort(
847 raise error.Abort(
845 _(b'unknown share naming mode: %s') % sharenamemode
848 _(b'unknown share naming mode: %s') % sharenamemode
846 )
849 )
847
850
848 # TODO this is a somewhat arbitrary restriction.
851 # TODO this is a somewhat arbitrary restriction.
849 if narrow:
852 if narrow:
850 ui.status(
853 ui.status(
851 _(b'(pooled storage not supported for narrow clones)\n')
854 _(b'(pooled storage not supported for narrow clones)\n')
852 )
855 )
853 sharepath = None
856 sharepath = None
854
857
855 if sharepath:
858 if sharepath:
856 return clonewithshare(
859 return clonewithshare(
857 ui,
860 ui,
858 peeropts,
861 peeropts,
859 sharepath,
862 sharepath,
860 source,
863 source,
861 srcpeer,
864 srcpeer,
862 dest,
865 dest,
863 pull=pull,
866 pull=pull,
864 rev=revs,
867 rev=revs,
865 update=update,
868 update=update,
866 stream=stream,
869 stream=stream,
867 )
870 )
868
871
869 srcrepo = srcpeer.local()
872 srcrepo = srcpeer.local()
870
873
871 abspath = origsource
874 abspath = origsource
872 if islocal(origsource):
875 if islocal(origsource):
873 abspath = util.abspath(urlutil.urllocalpath(origsource))
876 abspath = util.abspath(urlutil.urllocalpath(origsource))
874
877
875 if islocal(dest):
878 if islocal(dest):
876 if os.path.exists(dest):
879 if os.path.exists(dest):
877 # only clean up directories we create ourselves
880 # only clean up directories we create ourselves
878 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
879 cleandir = hgdir
882 cleandir = hgdir
880 else:
883 else:
881 cleandir = dest
884 cleandir = dest
882
885
883 copy = False
886 copy = False
884 if (
887 if (
885 srcrepo
888 srcrepo
886 and srcrepo.cancopy()
889 and srcrepo.cancopy()
887 and islocal(dest)
890 and islocal(dest)
888 and not phases.hassecret(srcrepo)
891 and not phases.hassecret(srcrepo)
889 ):
892 ):
890 copy = not pull and not revs
893 copy = not pull and not revs
891
894
892 # TODO this is a somewhat arbitrary restriction.
895 # TODO this is a somewhat arbitrary restriction.
893 if narrow:
896 if narrow:
894 copy = False
897 copy = False
895
898
896 if copy:
899 if copy:
897 try:
900 try:
898 # we use a lock here because if we race with commit, we
901 # we use a lock here because if we race with commit, we
899 # can end up with extra data in the cloned revlogs that's
902 # can end up with extra data in the cloned revlogs that's
900 # not pointed to by changesets, thus causing verify to
903 # not pointed to by changesets, thus causing verify to
901 # fail
904 # fail
902 srclock = srcrepo.lock(wait=False)
905 srclock = srcrepo.lock(wait=False)
903 except error.LockError:
906 except error.LockError:
904 copy = False
907 copy = False
905
908
906 if copy:
909 if copy:
907 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
908
911
909 destrootpath = urlutil.urllocalpath(dest)
912 destrootpath = urlutil.urllocalpath(dest)
910 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
911 localrepo.createrepository(
914 localrepo.createrepository(
912 ui,
915 ui,
913 destrootpath,
916 destrootpath,
914 requirements=dest_reqs,
917 requirements=dest_reqs,
915 )
918 )
916 destrepo = localrepo.makelocalrepository(ui, destrootpath)
919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
917
920
918 destwlock = destrepo.wlock()
921 destwlock = destrepo.wlock()
919 destlock = destrepo.lock()
922 destlock = destrepo.lock()
920 from . import streamclone # avoid cycle
923 from . import streamclone # avoid cycle
921
924
922 streamclone.local_copy(srcrepo, destrepo)
925 streamclone.local_copy(srcrepo, destrepo)
923
926
924 # we need to re-init the repo after manually copying the data
927 # we need to re-init the repo after manually copying the data
925 # into it
928 # into it
926 destpeer = peer(srcrepo, peeropts, dest)
929 destpeer = peer(srcrepo, peeropts, dest)
927
930
928 # make the peer aware that is it already locked
931 # make the peer aware that is it already locked
929 #
932 #
930 # important:
933 # important:
931 #
934 #
932 # We still need to release that lock at the end of the function
935 # We still need to release that lock at the end of the function
933 destpeer.local()._lockref = weakref.ref(destlock)
936 destpeer.local()._lockref = weakref.ref(destlock)
934 destpeer.local()._wlockref = weakref.ref(destwlock)
937 destpeer.local()._wlockref = weakref.ref(destwlock)
935 # dirstate also needs to be copied because `_wlockref` has a reference
938 # dirstate also needs to be copied because `_wlockref` has a reference
936 # to it: this dirstate is saved to disk when the wlock is released
939 # to it: this dirstate is saved to disk when the wlock is released
937 destpeer.local().dirstate = destrepo.dirstate
940 destpeer.local().dirstate = destrepo.dirstate
938
941
939 srcrepo.hook(
942 srcrepo.hook(
940 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
941 )
944 )
942 else:
945 else:
943 try:
946 try:
944 # only pass ui when no srcrepo
947 # only pass ui when no srcrepo
945 destpeer = peer(
948 destpeer = peer(
946 srcrepo or ui,
949 srcrepo or ui,
947 peeropts,
950 peeropts,
948 dest,
951 dest,
949 create=True,
952 create=True,
950 createopts=createopts,
953 createopts=createopts,
951 )
954 )
952 except FileExistsError:
955 except FileExistsError:
953 cleandir = None
956 cleandir = None
954 raise error.Abort(_(b"destination '%s' already exists") % dest)
957 raise error.Abort(_(b"destination '%s' already exists") % dest)
955
958
956 if revs:
959 if revs:
957 if not srcpeer.capable(b'lookup'):
960 if not srcpeer.capable(b'lookup'):
958 raise error.Abort(
961 raise error.Abort(
959 _(
962 _(
960 b"src repository does not support "
963 b"src repository does not support "
961 b"revision lookup and so doesn't "
964 b"revision lookup and so doesn't "
962 b"support clone by revision"
965 b"support clone by revision"
963 )
966 )
964 )
967 )
965
968
966 # TODO this is batchable.
969 # TODO this is batchable.
967 remoterevs = []
970 remoterevs = []
968 for rev in revs:
971 for rev in revs:
969 with srcpeer.commandexecutor() as e:
972 with srcpeer.commandexecutor() as e:
970 remoterevs.append(
973 remoterevs.append(
971 e.callcommand(
974 e.callcommand(
972 b'lookup',
975 b'lookup',
973 {
976 {
974 b'key': rev,
977 b'key': rev,
975 },
978 },
976 ).result()
979 ).result()
977 )
980 )
978 revs = remoterevs
981 revs = remoterevs
979
982
980 checkout = revs[0]
983 checkout = revs[0]
981 else:
984 else:
982 revs = None
985 revs = None
983 local = destpeer.local()
986 local = destpeer.local()
984 if local:
987 if local:
985 if narrow:
988 if narrow:
986 with local.wlock(), local.lock():
989 with local.wlock(), local.lock():
987 local.setnarrowpats(storeincludepats, storeexcludepats)
990 local.setnarrowpats(storeincludepats, storeexcludepats)
988 narrowspec.copytoworkingcopy(local)
991 narrowspec.copytoworkingcopy(local)
989
992
990 u = urlutil.url(abspath)
993 u = urlutil.url(abspath)
991 defaulturl = bytes(u)
994 defaulturl = bytes(u)
992 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
993 if not stream:
996 if not stream:
994 if pull:
997 if pull:
995 stream = False
998 stream = False
996 else:
999 else:
997 stream = None
1000 stream = None
998 # internal config: ui.quietbookmarkmove
1001 # internal config: ui.quietbookmarkmove
999 overrides = {(b'ui', b'quietbookmarkmove'): True}
1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1000 with local.ui.configoverride(overrides, b'clone'):
1003 with local.ui.configoverride(overrides, b'clone'):
1001 exchange.pull(
1004 exchange.pull(
1002 local,
1005 local,
1003 srcpeer,
1006 srcpeer,
1004 heads=revs,
1007 heads=revs,
1005 streamclonerequested=stream,
1008 streamclonerequested=stream,
1006 includepats=storeincludepats,
1009 includepats=storeincludepats,
1007 excludepats=storeexcludepats,
1010 excludepats=storeexcludepats,
1008 depth=depth,
1011 depth=depth,
1009 )
1012 )
1010 elif srcrepo:
1013 elif srcrepo:
1011 # TODO lift restriction once exchange.push() accepts narrow
1014 # TODO lift restriction once exchange.push() accepts narrow
1012 # push.
1015 # push.
1013 if narrow:
1016 if narrow:
1014 raise error.Abort(
1017 raise error.Abort(
1015 _(
1018 _(
1016 b'narrow clone not available for '
1019 b'narrow clone not available for '
1017 b'remote destinations'
1020 b'remote destinations'
1018 )
1021 )
1019 )
1022 )
1020
1023
1021 exchange.push(
1024 exchange.push(
1022 srcrepo,
1025 srcrepo,
1023 destpeer,
1026 destpeer,
1024 revs=revs,
1027 revs=revs,
1025 bookmarks=srcrepo._bookmarks.keys(),
1028 bookmarks=srcrepo._bookmarks.keys(),
1026 )
1029 )
1027 else:
1030 else:
1028 raise error.Abort(
1031 raise error.Abort(
1029 _(b"clone from remote to remote not supported")
1032 _(b"clone from remote to remote not supported")
1030 )
1033 )
1031
1034
1032 cleandir = None
1035 cleandir = None
1033
1036
1034 destrepo = destpeer.local()
1037 destrepo = destpeer.local()
1035 if destrepo:
1038 if destrepo:
1036 template = uimod.samplehgrcs[b'cloned']
1039 template = uimod.samplehgrcs[b'cloned']
1037 u = urlutil.url(abspath)
1040 u = urlutil.url(abspath)
1038 u.passwd = None
1041 u.passwd = None
1039 defaulturl = bytes(u)
1042 defaulturl = bytes(u)
1040 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1041 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1042
1045
1043 if ui.configbool(b'experimental', b'remotenames'):
1046 if ui.configbool(b'experimental', b'remotenames'):
1044 logexchange.pullremotenames(destrepo, srcpeer)
1047 logexchange.pullremotenames(destrepo, srcpeer)
1045
1048
1046 if update:
1049 if update:
1047 if update is not True:
1050 if update is not True:
1048 with srcpeer.commandexecutor() as e:
1051 with srcpeer.commandexecutor() as e:
1049 checkout = e.callcommand(
1052 checkout = e.callcommand(
1050 b'lookup',
1053 b'lookup',
1051 {
1054 {
1052 b'key': update,
1055 b'key': update,
1053 },
1056 },
1054 ).result()
1057 ).result()
1055
1058
1056 uprev = None
1059 uprev = None
1057 status = None
1060 status = None
1058 if checkout is not None:
1061 if checkout is not None:
1059 # Some extensions (at least hg-git and hg-subversion) have
1062 # Some extensions (at least hg-git and hg-subversion) have
1060 # a peer.lookup() implementation that returns a name instead
1063 # a peer.lookup() implementation that returns a name instead
1061 # of a nodeid. We work around it here until we've figured
1064 # of a nodeid. We work around it here until we've figured
1062 # out a better solution.
1065 # out a better solution.
1063 if len(checkout) == 20 and checkout in destrepo:
1066 if len(checkout) == 20 and checkout in destrepo:
1064 uprev = checkout
1067 uprev = checkout
1065 elif scmutil.isrevsymbol(destrepo, checkout):
1068 elif scmutil.isrevsymbol(destrepo, checkout):
1066 uprev = scmutil.revsymbol(destrepo, checkout).node()
1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1067 else:
1070 else:
1068 if update is not True:
1071 if update is not True:
1069 try:
1072 try:
1070 uprev = destrepo.lookup(update)
1073 uprev = destrepo.lookup(update)
1071 except error.RepoLookupError:
1074 except error.RepoLookupError:
1072 pass
1075 pass
1073 if uprev is None:
1076 if uprev is None:
1074 try:
1077 try:
1075 if destrepo._activebookmark:
1078 if destrepo._activebookmark:
1076 uprev = destrepo.lookup(destrepo._activebookmark)
1079 uprev = destrepo.lookup(destrepo._activebookmark)
1077 update = destrepo._activebookmark
1080 update = destrepo._activebookmark
1078 else:
1081 else:
1079 uprev = destrepo._bookmarks[b'@']
1082 uprev = destrepo._bookmarks[b'@']
1080 update = b'@'
1083 update = b'@'
1081 bn = destrepo[uprev].branch()
1084 bn = destrepo[uprev].branch()
1082 if bn == b'default':
1085 if bn == b'default':
1083 status = _(b"updating to bookmark %s\n" % update)
1086 status = _(b"updating to bookmark %s\n" % update)
1084 else:
1087 else:
1085 status = (
1088 status = (
1086 _(b"updating to bookmark %s on branch %s\n")
1089 _(b"updating to bookmark %s on branch %s\n")
1087 ) % (update, bn)
1090 ) % (update, bn)
1088 except KeyError:
1091 except KeyError:
1089 try:
1092 try:
1090 uprev = destrepo.branchtip(b'default')
1093 uprev = destrepo.branchtip(b'default')
1091 except error.RepoLookupError:
1094 except error.RepoLookupError:
1092 uprev = destrepo.lookup(b'tip')
1095 uprev = destrepo.lookup(b'tip')
1093 if not status:
1096 if not status:
1094 bn = destrepo[uprev].branch()
1097 bn = destrepo[uprev].branch()
1095 status = _(b"updating to branch %s\n") % bn
1098 status = _(b"updating to branch %s\n") % bn
1096 destrepo.ui.status(status)
1099 destrepo.ui.status(status)
1097 _update(destrepo, uprev)
1100 _update(destrepo, uprev)
1098 if update in destrepo._bookmarks:
1101 if update in destrepo._bookmarks:
1099 bookmarks.activate(destrepo, update)
1102 bookmarks.activate(destrepo, update)
1100 if destlock is not None:
1103 if destlock is not None:
1101 release(destlock)
1104 release(destlock)
1102 if destwlock is not None:
1105 if destwlock is not None:
1103 release(destlock)
1106 release(destlock)
1104 # here is a tiny windows were someone could end up writing the
1107 # here is a tiny windows were someone could end up writing the
1105 # repository before the cache are sure to be warm. This is "fine"
1108 # repository before the cache are sure to be warm. This is "fine"
1106 # as the only "bad" outcome would be some slowness. That potential
1109 # as the only "bad" outcome would be some slowness. That potential
1107 # slowness already affect reader.
1110 # slowness already affect reader.
1108 with destrepo.lock():
1111 with destrepo.lock():
1109 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1110 finally:
1113 finally:
1111 release(srclock, destlock, destwlock)
1114 release(srclock, destlock, destwlock)
1112 if cleandir is not None:
1115 if cleandir is not None:
1113 shutil.rmtree(cleandir, True)
1116 shutil.rmtree(cleandir, True)
1114 if srcpeer is not None:
1117 if srcpeer is not None:
1115 srcpeer.close()
1118 srcpeer.close()
1116 if destpeer and destpeer.local() is None:
1119 if destpeer and destpeer.local() is None:
1117 destpeer.close()
1120 destpeer.close()
1118 return srcpeer, destpeer
1121 return srcpeer, destpeer
1119
1122
1120
1123
1121 def _showstats(repo, stats, quietempty=False):
1124 def _showstats(repo, stats, quietempty=False):
1122 if quietempty and stats.isempty():
1125 if quietempty and stats.isempty():
1123 return
1126 return
1124 repo.ui.status(
1127 repo.ui.status(
1125 _(
1128 _(
1126 b"%d files updated, %d files merged, "
1129 b"%d files updated, %d files merged, "
1127 b"%d files removed, %d files unresolved\n"
1130 b"%d files removed, %d files unresolved\n"
1128 )
1131 )
1129 % (
1132 % (
1130 stats.updatedcount,
1133 stats.updatedcount,
1131 stats.mergedcount,
1134 stats.mergedcount,
1132 stats.removedcount,
1135 stats.removedcount,
1133 stats.unresolvedcount,
1136 stats.unresolvedcount,
1134 )
1137 )
1135 )
1138 )
1136
1139
1137
1140
1138 def updaterepo(repo, node, overwrite, updatecheck=None):
1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1139 """Update the working directory to node.
1142 """Update the working directory to node.
1140
1143
1141 When overwrite is set, changes are clobbered, merged else
1144 When overwrite is set, changes are clobbered, merged else
1142
1145
1143 returns stats (see pydoc mercurial.merge.applyupdates)"""
1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1144 repo.ui.deprecwarn(
1147 repo.ui.deprecwarn(
1145 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1146 b'5.7',
1149 b'5.7',
1147 )
1150 )
1148 return mergemod._update(
1151 return mergemod._update(
1149 repo,
1152 repo,
1150 node,
1153 node,
1151 branchmerge=False,
1154 branchmerge=False,
1152 force=overwrite,
1155 force=overwrite,
1153 labels=[b'working copy', b'destination'],
1156 labels=[b'working copy', b'destination'],
1154 updatecheck=updatecheck,
1157 updatecheck=updatecheck,
1155 )
1158 )
1156
1159
1157
1160
1158 def update(repo, node, quietempty=False, updatecheck=None):
1161 def update(repo, node, quietempty=False, updatecheck=None):
1159 """update the working directory to node"""
1162 """update the working directory to node"""
1160 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1161 _showstats(repo, stats, quietempty)
1164 _showstats(repo, stats, quietempty)
1162 if stats.unresolvedcount:
1165 if stats.unresolvedcount:
1163 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1164 return stats.unresolvedcount > 0
1167 return stats.unresolvedcount > 0
1165
1168
1166
1169
1167 # naming conflict in clone()
1170 # naming conflict in clone()
1168 _update = update
1171 _update = update
1169
1172
1170
1173
1171 def clean(repo, node, show_stats=True, quietempty=False):
1174 def clean(repo, node, show_stats=True, quietempty=False):
1172 """forcibly switch the working directory to node, clobbering changes"""
1175 """forcibly switch the working directory to node, clobbering changes"""
1173 stats = mergemod.clean_update(repo[node])
1176 stats = mergemod.clean_update(repo[node])
1174 assert stats.unresolvedcount == 0
1177 assert stats.unresolvedcount == 0
1175 if show_stats:
1178 if show_stats:
1176 _showstats(repo, stats, quietempty)
1179 _showstats(repo, stats, quietempty)
1177 return False
1180 return False
1178
1181
1179
1182
1180 # naming conflict in updatetotally()
1183 # naming conflict in updatetotally()
1181 _clean = clean
1184 _clean = clean
1182
1185
1183 _VALID_UPDATECHECKS = {
1186 _VALID_UPDATECHECKS = {
1184 mergemod.UPDATECHECK_ABORT,
1187 mergemod.UPDATECHECK_ABORT,
1185 mergemod.UPDATECHECK_NONE,
1188 mergemod.UPDATECHECK_NONE,
1186 mergemod.UPDATECHECK_LINEAR,
1189 mergemod.UPDATECHECK_LINEAR,
1187 mergemod.UPDATECHECK_NO_CONFLICT,
1190 mergemod.UPDATECHECK_NO_CONFLICT,
1188 }
1191 }
1189
1192
1190
1193
1191 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1192 """Update the working directory with extra care for non-file components
1195 """Update the working directory with extra care for non-file components
1193
1196
1194 This takes care of non-file components below:
1197 This takes care of non-file components below:
1195
1198
1196 :bookmark: might be advanced or (in)activated
1199 :bookmark: might be advanced or (in)activated
1197
1200
1198 This takes arguments below:
1201 This takes arguments below:
1199
1202
1200 :checkout: to which revision the working directory is updated
1203 :checkout: to which revision the working directory is updated
1201 :brev: a name, which might be a bookmark to be activated after updating
1204 :brev: a name, which might be a bookmark to be activated after updating
1202 :clean: whether changes in the working directory can be discarded
1205 :clean: whether changes in the working directory can be discarded
1203 :updatecheck: how to deal with a dirty working directory
1206 :updatecheck: how to deal with a dirty working directory
1204
1207
1205 Valid values for updatecheck are the UPDATECHECK_* constants
1208 Valid values for updatecheck are the UPDATECHECK_* constants
1206 defined in the merge module. Passing `None` will result in using the
1209 defined in the merge module. Passing `None` will result in using the
1207 configured default.
1210 configured default.
1208
1211
1209 * ABORT: abort if the working directory is dirty
1212 * ABORT: abort if the working directory is dirty
1210 * NONE: don't check (merge working directory changes into destination)
1213 * NONE: don't check (merge working directory changes into destination)
1211 * LINEAR: check that update is linear before merging working directory
1214 * LINEAR: check that update is linear before merging working directory
1212 changes into destination
1215 changes into destination
1213 * NO_CONFLICT: check that the update does not result in file merges
1216 * NO_CONFLICT: check that the update does not result in file merges
1214
1217
1215 This returns whether conflict is detected at updating or not.
1218 This returns whether conflict is detected at updating or not.
1216 """
1219 """
1217 if updatecheck is None:
1220 if updatecheck is None:
1218 updatecheck = ui.config(b'commands', b'update.check')
1221 updatecheck = ui.config(b'commands', b'update.check')
1219 if updatecheck not in _VALID_UPDATECHECKS:
1222 if updatecheck not in _VALID_UPDATECHECKS:
1220 # If not configured, or invalid value configured
1223 # If not configured, or invalid value configured
1221 updatecheck = mergemod.UPDATECHECK_LINEAR
1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1222 if updatecheck not in _VALID_UPDATECHECKS:
1225 if updatecheck not in _VALID_UPDATECHECKS:
1223 raise ValueError(
1226 raise ValueError(
1224 r'Invalid updatecheck value %r (can accept %r)'
1227 r'Invalid updatecheck value %r (can accept %r)'
1225 % (updatecheck, _VALID_UPDATECHECKS)
1228 % (updatecheck, _VALID_UPDATECHECKS)
1226 )
1229 )
1227 with repo.wlock():
1230 with repo.wlock():
1228 movemarkfrom = None
1231 movemarkfrom = None
1229 warndest = False
1232 warndest = False
1230 if checkout is None:
1233 if checkout is None:
1231 updata = destutil.destupdate(repo, clean=clean)
1234 updata = destutil.destupdate(repo, clean=clean)
1232 checkout, movemarkfrom, brev = updata
1235 checkout, movemarkfrom, brev = updata
1233 warndest = True
1236 warndest = True
1234
1237
1235 if clean:
1238 if clean:
1236 ret = _clean(repo, checkout)
1239 ret = _clean(repo, checkout)
1237 else:
1240 else:
1238 if updatecheck == mergemod.UPDATECHECK_ABORT:
1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1239 cmdutil.bailifchanged(repo, merge=False)
1242 cmdutil.bailifchanged(repo, merge=False)
1240 updatecheck = mergemod.UPDATECHECK_NONE
1243 updatecheck = mergemod.UPDATECHECK_NONE
1241 ret = _update(repo, checkout, updatecheck=updatecheck)
1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1242
1245
1243 if not ret and movemarkfrom:
1246 if not ret and movemarkfrom:
1244 if movemarkfrom == repo[b'.'].node():
1247 if movemarkfrom == repo[b'.'].node():
1245 pass # no-op update
1248 pass # no-op update
1246 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1247 b = ui.label(repo._activebookmark, b'bookmarks.active')
1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1248 ui.status(_(b"updating bookmark %s\n") % b)
1251 ui.status(_(b"updating bookmark %s\n") % b)
1249 else:
1252 else:
1250 # this can happen with a non-linear update
1253 # this can happen with a non-linear update
1251 b = ui.label(repo._activebookmark, b'bookmarks')
1254 b = ui.label(repo._activebookmark, b'bookmarks')
1252 ui.status(_(b"(leaving bookmark %s)\n") % b)
1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1253 bookmarks.deactivate(repo)
1256 bookmarks.deactivate(repo)
1254 elif brev in repo._bookmarks:
1257 elif brev in repo._bookmarks:
1255 if brev != repo._activebookmark:
1258 if brev != repo._activebookmark:
1256 b = ui.label(brev, b'bookmarks.active')
1259 b = ui.label(brev, b'bookmarks.active')
1257 ui.status(_(b"(activating bookmark %s)\n") % b)
1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1258 bookmarks.activate(repo, brev)
1261 bookmarks.activate(repo, brev)
1259 elif brev:
1262 elif brev:
1260 if repo._activebookmark:
1263 if repo._activebookmark:
1261 b = ui.label(repo._activebookmark, b'bookmarks')
1264 b = ui.label(repo._activebookmark, b'bookmarks')
1262 ui.status(_(b"(leaving bookmark %s)\n") % b)
1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1263 bookmarks.deactivate(repo)
1266 bookmarks.deactivate(repo)
1264
1267
1265 if warndest:
1268 if warndest:
1266 destutil.statusotherdests(ui, repo)
1269 destutil.statusotherdests(ui, repo)
1267
1270
1268 return ret
1271 return ret
1269
1272
1270
1273
1271 def merge(
1274 def merge(
1272 ctx,
1275 ctx,
1273 force=False,
1276 force=False,
1274 remind=True,
1277 remind=True,
1275 labels=None,
1278 labels=None,
1276 ):
1279 ):
1277 """Branch merge with node, resolving changes. Return true if any
1280 """Branch merge with node, resolving changes. Return true if any
1278 unresolved conflicts."""
1281 unresolved conflicts."""
1279 repo = ctx.repo()
1282 repo = ctx.repo()
1280 stats = mergemod.merge(ctx, force=force, labels=labels)
1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1281 _showstats(repo, stats)
1284 _showstats(repo, stats)
1282 if stats.unresolvedcount:
1285 if stats.unresolvedcount:
1283 repo.ui.status(
1286 repo.ui.status(
1284 _(
1287 _(
1285 b"use 'hg resolve' to retry unresolved file merges "
1288 b"use 'hg resolve' to retry unresolved file merges "
1286 b"or 'hg merge --abort' to abandon\n"
1289 b"or 'hg merge --abort' to abandon\n"
1287 )
1290 )
1288 )
1291 )
1289 elif remind:
1292 elif remind:
1290 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1291 return stats.unresolvedcount > 0
1294 return stats.unresolvedcount > 0
1292
1295
1293
1296
1294 def abortmerge(ui, repo):
1297 def abortmerge(ui, repo):
1295 ms = mergestatemod.mergestate.read(repo)
1298 ms = mergestatemod.mergestate.read(repo)
1296 if ms.active():
1299 if ms.active():
1297 # there were conflicts
1300 # there were conflicts
1298 node = ms.localctx.hex()
1301 node = ms.localctx.hex()
1299 else:
1302 else:
1300 # there were no conficts, mergestate was not stored
1303 # there were no conficts, mergestate was not stored
1301 node = repo[b'.'].hex()
1304 node = repo[b'.'].hex()
1302
1305
1303 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1304 stats = mergemod.clean_update(repo[node])
1307 stats = mergemod.clean_update(repo[node])
1305 assert stats.unresolvedcount == 0
1308 assert stats.unresolvedcount == 0
1306 _showstats(repo, stats)
1309 _showstats(repo, stats)
1307
1310
1308
1311
1309 def _incoming(
1312 def _incoming(
1310 displaychlist,
1313 displaychlist,
1311 subreporecurse,
1314 subreporecurse,
1312 ui,
1315 ui,
1313 repo,
1316 repo,
1314 source,
1317 source,
1315 opts,
1318 opts,
1316 buffered=False,
1319 buffered=False,
1317 subpath=None,
1320 subpath=None,
1318 ):
1321 ):
1319 """
1322 """
1320 Helper for incoming / gincoming.
1323 Helper for incoming / gincoming.
1321 displaychlist gets called with
1324 displaychlist gets called with
1322 (remoterepo, incomingchangesetlist, displayer) parameters,
1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1323 and is supposed to contain only code that can't be unified.
1326 and is supposed to contain only code that can't be unified.
1324 """
1327 """
1325 srcs = urlutil.get_pull_paths(repo, ui, [source])
1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1326 srcs = list(srcs)
1329 srcs = list(srcs)
1327 if len(srcs) != 1:
1330 if len(srcs) != 1:
1328 msg = _(b'for now, incoming supports only a single source, %d provided')
1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1329 msg %= len(srcs)
1332 msg %= len(srcs)
1330 raise error.Abort(msg)
1333 raise error.Abort(msg)
1331 path = srcs[0]
1334 path = srcs[0]
1332 if subpath is None:
1335 if subpath is None:
1333 peer_path = path
1336 peer_path = path
1334 url = path.loc
1337 url = path.loc
1335 else:
1338 else:
1336 # XXX path: we are losing the `path` object here. Keeping it would be
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1337 # valuable. For example as a "variant" as we do for pushes.
1340 # valuable. For example as a "variant" as we do for pushes.
1338 subpath = urlutil.url(subpath)
1341 subpath = urlutil.url(subpath)
1339 if subpath.isabs():
1342 if subpath.isabs():
1340 peer_path = url = bytes(subpath)
1343 peer_path = url = bytes(subpath)
1341 else:
1344 else:
1342 p = urlutil.url(path.loc)
1345 p = urlutil.url(path.loc)
1343 if p.islocal():
1346 if p.islocal():
1344 normpath = os.path.normpath
1347 normpath = os.path.normpath
1345 else:
1348 else:
1346 normpath = posixpath.normpath
1349 normpath = posixpath.normpath
1347 p.path = normpath(b'%s/%s' % (p.path, subpath))
1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1348 peer_path = url = bytes(p)
1351 peer_path = url = bytes(p)
1349 other = peer(repo, opts, peer_path)
1352 other = peer(repo, opts, peer_path)
1350 cleanupfn = other.close
1353 cleanupfn = other.close
1351 try:
1354 try:
1352 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1353 branches = (path.branch, opts.get(b'branch', []))
1356 branches = (path.branch, opts.get(b'branch', []))
1354 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1355
1358
1356 if revs:
1359 if revs:
1357 revs = [other.lookup(rev) for rev in revs]
1360 revs = [other.lookup(rev) for rev in revs]
1358 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1359 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1360 )
1363 )
1361
1364
1362 if not chlist:
1365 if not chlist:
1363 ui.status(_(b"no changes found\n"))
1366 ui.status(_(b"no changes found\n"))
1364 return subreporecurse()
1367 return subreporecurse()
1365 ui.pager(b'incoming')
1368 ui.pager(b'incoming')
1366 displayer = logcmdutil.changesetdisplayer(
1369 displayer = logcmdutil.changesetdisplayer(
1367 ui, other, opts, buffered=buffered
1370 ui, other, opts, buffered=buffered
1368 )
1371 )
1369 displaychlist(other, chlist, displayer)
1372 displaychlist(other, chlist, displayer)
1370 displayer.close()
1373 displayer.close()
1371 finally:
1374 finally:
1372 cleanupfn()
1375 cleanupfn()
1373 subreporecurse()
1376 subreporecurse()
1374 return 0 # exit code is zero since we found incoming changes
1377 return 0 # exit code is zero since we found incoming changes
1375
1378
1376
1379
1377 def incoming(ui, repo, source, opts, subpath=None):
1380 def incoming(ui, repo, source, opts, subpath=None):
1378 def subreporecurse():
1381 def subreporecurse():
1379 ret = 1
1382 ret = 1
1380 if opts.get(b'subrepos'):
1383 if opts.get(b'subrepos'):
1381 ctx = repo[None]
1384 ctx = repo[None]
1382 for subpath in sorted(ctx.substate):
1385 for subpath in sorted(ctx.substate):
1383 sub = ctx.sub(subpath)
1386 sub = ctx.sub(subpath)
1384 ret = min(ret, sub.incoming(ui, source, opts))
1387 ret = min(ret, sub.incoming(ui, source, opts))
1385 return ret
1388 return ret
1386
1389
1387 def display(other, chlist, displayer):
1390 def display(other, chlist, displayer):
1388 limit = logcmdutil.getlimit(opts)
1391 limit = logcmdutil.getlimit(opts)
1389 if opts.get(b'newest_first'):
1392 if opts.get(b'newest_first'):
1390 chlist.reverse()
1393 chlist.reverse()
1391 count = 0
1394 count = 0
1392 for n in chlist:
1395 for n in chlist:
1393 if limit is not None and count >= limit:
1396 if limit is not None and count >= limit:
1394 break
1397 break
1395 parents = [
1398 parents = [
1396 p for p in other.changelog.parents(n) if p != repo.nullid
1399 p for p in other.changelog.parents(n) if p != repo.nullid
1397 ]
1400 ]
1398 if opts.get(b'no_merges') and len(parents) == 2:
1401 if opts.get(b'no_merges') and len(parents) == 2:
1399 continue
1402 continue
1400 count += 1
1403 count += 1
1401 displayer.show(other[n])
1404 displayer.show(other[n])
1402
1405
1403 return _incoming(
1406 return _incoming(
1404 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1405 )
1408 )
1406
1409
1407
1410
1408 def _outgoing(ui, repo, dests, opts, subpath=None):
1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1409 out = set()
1412 out = set()
1410 others = []
1413 others = []
1411 for path in urlutil.get_push_paths(repo, ui, dests):
1414 for path in urlutil.get_push_paths(repo, ui, dests):
1412 dest = path.loc
1415 dest = path.loc
1413 if subpath is not None:
1416 if subpath is not None:
1414 subpath = urlutil.url(subpath)
1417 subpath = urlutil.url(subpath)
1415 if subpath.isabs():
1418 if subpath.isabs():
1416 dest = bytes(subpath)
1419 dest = bytes(subpath)
1417 else:
1420 else:
1418 p = urlutil.url(dest)
1421 p = urlutil.url(dest)
1419 if p.islocal():
1422 if p.islocal():
1420 normpath = os.path.normpath
1423 normpath = os.path.normpath
1421 else:
1424 else:
1422 normpath = posixpath.normpath
1425 normpath = posixpath.normpath
1423 p.path = normpath(b'%s/%s' % (p.path, subpath))
1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1424 dest = bytes(p)
1427 dest = bytes(p)
1425 branches = path.branch, opts.get(b'branch') or []
1428 branches = path.branch, opts.get(b'branch') or []
1426
1429
1427 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1428 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1429 if revs:
1432 if revs:
1430 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1431
1434
1432 other = peer(repo, opts, dest)
1435 other = peer(repo, opts, dest)
1433 try:
1436 try:
1434 outgoing = discovery.findcommonoutgoing(
1437 outgoing = discovery.findcommonoutgoing(
1435 repo, other, revs, force=opts.get(b'force')
1438 repo, other, revs, force=opts.get(b'force')
1436 )
1439 )
1437 o = outgoing.missing
1440 o = outgoing.missing
1438 out.update(o)
1441 out.update(o)
1439 if not o:
1442 if not o:
1440 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1441 others.append(other)
1444 others.append(other)
1442 except: # re-raises
1445 except: # re-raises
1443 other.close()
1446 other.close()
1444 raise
1447 raise
1445 # make sure this is ordered by revision number
1448 # make sure this is ordered by revision number
1446 outgoing_revs = list(out)
1449 outgoing_revs = list(out)
1447 cl = repo.changelog
1450 cl = repo.changelog
1448 outgoing_revs.sort(key=cl.rev)
1451 outgoing_revs.sort(key=cl.rev)
1449 return outgoing_revs, others
1452 return outgoing_revs, others
1450
1453
1451
1454
1452 def _outgoing_recurse(ui, repo, dests, opts):
1455 def _outgoing_recurse(ui, repo, dests, opts):
1453 ret = 1
1456 ret = 1
1454 if opts.get(b'subrepos'):
1457 if opts.get(b'subrepos'):
1455 ctx = repo[None]
1458 ctx = repo[None]
1456 for subpath in sorted(ctx.substate):
1459 for subpath in sorted(ctx.substate):
1457 sub = ctx.sub(subpath)
1460 sub = ctx.sub(subpath)
1458 ret = min(ret, sub.outgoing(ui, dests, opts))
1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1459 return ret
1462 return ret
1460
1463
1461
1464
1462 def _outgoing_filter(repo, revs, opts):
1465 def _outgoing_filter(repo, revs, opts):
1463 """apply revision filtering/ordering option for outgoing"""
1466 """apply revision filtering/ordering option for outgoing"""
1464 limit = logcmdutil.getlimit(opts)
1467 limit = logcmdutil.getlimit(opts)
1465 no_merges = opts.get(b'no_merges')
1468 no_merges = opts.get(b'no_merges')
1466 if opts.get(b'newest_first'):
1469 if opts.get(b'newest_first'):
1467 revs.reverse()
1470 revs.reverse()
1468 if limit is None and not no_merges:
1471 if limit is None and not no_merges:
1469 for r in revs:
1472 for r in revs:
1470 yield r
1473 yield r
1471 return
1474 return
1472
1475
1473 count = 0
1476 count = 0
1474 cl = repo.changelog
1477 cl = repo.changelog
1475 for n in revs:
1478 for n in revs:
1476 if limit is not None and count >= limit:
1479 if limit is not None and count >= limit:
1477 break
1480 break
1478 parents = [p for p in cl.parents(n) if p != repo.nullid]
1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1479 if no_merges and len(parents) == 2:
1482 if no_merges and len(parents) == 2:
1480 continue
1483 continue
1481 count += 1
1484 count += 1
1482 yield n
1485 yield n
1483
1486
1484
1487
1485 def outgoing(ui, repo, dests, opts, subpath=None):
1488 def outgoing(ui, repo, dests, opts, subpath=None):
1486 if opts.get(b'graph'):
1489 if opts.get(b'graph'):
1487 logcmdutil.checkunsupportedgraphflags([], opts)
1490 logcmdutil.checkunsupportedgraphflags([], opts)
1488 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1489 ret = 1
1492 ret = 1
1490 try:
1493 try:
1491 if o:
1494 if o:
1492 ret = 0
1495 ret = 0
1493
1496
1494 if opts.get(b'graph'):
1497 if opts.get(b'graph'):
1495 revdag = logcmdutil.graphrevs(repo, o, opts)
1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1496 ui.pager(b'outgoing')
1499 ui.pager(b'outgoing')
1497 displayer = logcmdutil.changesetdisplayer(
1500 displayer = logcmdutil.changesetdisplayer(
1498 ui, repo, opts, buffered=True
1501 ui, repo, opts, buffered=True
1499 )
1502 )
1500 logcmdutil.displaygraph(
1503 logcmdutil.displaygraph(
1501 ui, repo, revdag, displayer, graphmod.asciiedges
1504 ui, repo, revdag, displayer, graphmod.asciiedges
1502 )
1505 )
1503 else:
1506 else:
1504 ui.pager(b'outgoing')
1507 ui.pager(b'outgoing')
1505 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1506 for n in _outgoing_filter(repo, o, opts):
1509 for n in _outgoing_filter(repo, o, opts):
1507 displayer.show(repo[n])
1510 displayer.show(repo[n])
1508 displayer.close()
1511 displayer.close()
1509 for oth in others:
1512 for oth in others:
1510 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1511 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1512 return ret # exit code is zero since we found outgoing changes
1515 return ret # exit code is zero since we found outgoing changes
1513 finally:
1516 finally:
1514 for oth in others:
1517 for oth in others:
1515 oth.close()
1518 oth.close()
1516
1519
1517
1520
1518 def verify(repo, level=None):
1521 def verify(repo, level=None):
1519 """verify the consistency of a repository"""
1522 """verify the consistency of a repository"""
1520 ret = verifymod.verify(repo, level=level)
1523 ret = verifymod.verify(repo, level=level)
1521
1524
1522 # Broken subrepo references in hidden csets don't seem worth worrying about,
1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1523 # since they can't be pushed/pulled, and --hidden can be used if they are a
1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1524 # concern.
1527 # concern.
1525
1528
1526 # pathto() is needed for -R case
1529 # pathto() is needed for -R case
1527 revs = repo.revs(
1530 revs = repo.revs(
1528 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1529 )
1532 )
1530
1533
1531 if revs:
1534 if revs:
1532 repo.ui.status(_(b'checking subrepo links\n'))
1535 repo.ui.status(_(b'checking subrepo links\n'))
1533 for rev in revs:
1536 for rev in revs:
1534 ctx = repo[rev]
1537 ctx = repo[rev]
1535 try:
1538 try:
1536 for subpath in ctx.substate:
1539 for subpath in ctx.substate:
1537 try:
1540 try:
1538 ret = (
1541 ret = (
1539 ctx.sub(subpath, allowcreate=False).verify() or ret
1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1540 )
1543 )
1541 except error.RepoError as e:
1544 except error.RepoError as e:
1542 repo.ui.warn(b'%d: %s\n' % (rev, e))
1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1543 except Exception:
1546 except Exception:
1544 repo.ui.warn(
1547 repo.ui.warn(
1545 _(b'.hgsubstate is corrupt in revision %s\n')
1548 _(b'.hgsubstate is corrupt in revision %s\n')
1546 % short(ctx.node())
1549 % short(ctx.node())
1547 )
1550 )
1548
1551
1549 return ret
1552 return ret
1550
1553
1551
1554
1552 def remoteui(src, opts):
1555 def remoteui(src, opts):
1553 """build a remote ui from ui or repo and opts"""
1556 """build a remote ui from ui or repo and opts"""
1554 if util.safehasattr(src, b'baseui'): # looks like a repository
1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1555 dst = src.baseui.copy() # drop repo-specific config
1558 dst = src.baseui.copy() # drop repo-specific config
1556 src = src.ui # copy target options from repo
1559 src = src.ui # copy target options from repo
1557 else: # assume it's a global ui object
1560 else: # assume it's a global ui object
1558 dst = src.copy() # keep all global options
1561 dst = src.copy() # keep all global options
1559
1562
1560 # copy ssh-specific options
1563 # copy ssh-specific options
1561 for o in b'ssh', b'remotecmd':
1564 for o in b'ssh', b'remotecmd':
1562 v = opts.get(o) or src.config(b'ui', o)
1565 v = opts.get(o) or src.config(b'ui', o)
1563 if v:
1566 if v:
1564 dst.setconfig(b"ui", o, v, b'copied')
1567 dst.setconfig(b"ui", o, v, b'copied')
1565
1568
1566 # copy bundle-specific options
1569 # copy bundle-specific options
1567 r = src.config(b'bundle', b'mainreporoot')
1570 r = src.config(b'bundle', b'mainreporoot')
1568 if r:
1571 if r:
1569 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1570
1573
1571 # copy selected local settings to the remote ui
1574 # copy selected local settings to the remote ui
1572 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1573 for key, val in src.configitems(sect):
1576 for key, val in src.configitems(sect):
1574 dst.setconfig(sect, key, val, b'copied')
1577 dst.setconfig(sect, key, val, b'copied')
1575 v = src.config(b'web', b'cacerts')
1578 v = src.config(b'web', b'cacerts')
1576 if v:
1579 if v:
1577 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1578
1581
1579 return dst
1582 return dst
1580
1583
1581
1584
1582 # Files of interest
1585 # Files of interest
1583 # Used to check if the repository has changed looking at mtime and size of
1586 # Used to check if the repository has changed looking at mtime and size of
1584 # these files.
1587 # these files.
1585 foi = [
1588 foi = [
1586 (b'spath', b'00changelog.i'),
1589 (b'spath', b'00changelog.i'),
1587 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1588 (b'spath', b'obsstore'),
1591 (b'spath', b'obsstore'),
1589 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1590 ]
1593 ]
1591
1594
1592
1595
1593 class cachedlocalrepo:
1596 class cachedlocalrepo:
1594 """Holds a localrepository that can be cached and reused."""
1597 """Holds a localrepository that can be cached and reused."""
1595
1598
1596 def __init__(self, repo):
1599 def __init__(self, repo):
1597 """Create a new cached repo from an existing repo.
1600 """Create a new cached repo from an existing repo.
1598
1601
1599 We assume the passed in repo was recently created. If the
1602 We assume the passed in repo was recently created. If the
1600 repo has changed between when it was created and when it was
1603 repo has changed between when it was created and when it was
1601 turned into a cache, it may not refresh properly.
1604 turned into a cache, it may not refresh properly.
1602 """
1605 """
1603 assert isinstance(repo, localrepo.localrepository)
1606 assert isinstance(repo, localrepo.localrepository)
1604 self._repo = repo
1607 self._repo = repo
1605 self._state, self.mtime = self._repostate()
1608 self._state, self.mtime = self._repostate()
1606 self._filtername = repo.filtername
1609 self._filtername = repo.filtername
1607
1610
1608 def fetch(self):
1611 def fetch(self):
1609 """Refresh (if necessary) and return a repository.
1612 """Refresh (if necessary) and return a repository.
1610
1613
1611 If the cached instance is out of date, it will be recreated
1614 If the cached instance is out of date, it will be recreated
1612 automatically and returned.
1615 automatically and returned.
1613
1616
1614 Returns a tuple of the repo and a boolean indicating whether a new
1617 Returns a tuple of the repo and a boolean indicating whether a new
1615 repo instance was created.
1618 repo instance was created.
1616 """
1619 """
1617 # We compare the mtimes and sizes of some well-known files to
1620 # We compare the mtimes and sizes of some well-known files to
1618 # determine if the repo changed. This is not precise, as mtimes
1621 # determine if the repo changed. This is not precise, as mtimes
1619 # are susceptible to clock skew and imprecise filesystems and
1622 # are susceptible to clock skew and imprecise filesystems and
1620 # file content can change while maintaining the same size.
1623 # file content can change while maintaining the same size.
1621
1624
1622 state, mtime = self._repostate()
1625 state, mtime = self._repostate()
1623 if state == self._state:
1626 if state == self._state:
1624 return self._repo, False
1627 return self._repo, False
1625
1628
1626 repo = repository(self._repo.baseui, self._repo.url())
1629 repo = repository(self._repo.baseui, self._repo.url())
1627 if self._filtername:
1630 if self._filtername:
1628 self._repo = repo.filtered(self._filtername)
1631 self._repo = repo.filtered(self._filtername)
1629 else:
1632 else:
1630 self._repo = repo.unfiltered()
1633 self._repo = repo.unfiltered()
1631 self._state = state
1634 self._state = state
1632 self.mtime = mtime
1635 self.mtime = mtime
1633
1636
1634 return self._repo, True
1637 return self._repo, True
1635
1638
1636 def _repostate(self):
1639 def _repostate(self):
1637 state = []
1640 state = []
1638 maxmtime = -1
1641 maxmtime = -1
1639 for attr, fname in foi:
1642 for attr, fname in foi:
1640 prefix = getattr(self._repo, attr)
1643 prefix = getattr(self._repo, attr)
1641 p = os.path.join(prefix, fname)
1644 p = os.path.join(prefix, fname)
1642 try:
1645 try:
1643 st = os.stat(p)
1646 st = os.stat(p)
1644 except OSError:
1647 except OSError:
1645 st = os.stat(prefix)
1648 st = os.stat(prefix)
1646 state.append((st[stat.ST_MTIME], st.st_size))
1649 state.append((st[stat.ST_MTIME], st.st_size))
1647 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1648
1651
1649 return tuple(state), maxmtime
1652 return tuple(state), maxmtime
1650
1653
1651 def copy(self):
1654 def copy(self):
1652 """Obtain a copy of this class instance.
1655 """Obtain a copy of this class instance.
1653
1656
1654 A new localrepository instance is obtained. The new instance should be
1657 A new localrepository instance is obtained. The new instance should be
1655 completely independent of the original.
1658 completely independent of the original.
1656 """
1659 """
1657 repo = repository(self._repo.baseui, self._repo.origroot)
1660 repo = repository(self._repo.baseui, self._repo.origroot)
1658 if self._filtername:
1661 if self._filtername:
1659 repo = repo.filtered(self._filtername)
1662 repo = repo.filtered(self._filtername)
1660 else:
1663 else:
1661 repo = repo.unfiltered()
1664 repo = repo.unfiltered()
1662 c = cachedlocalrepo(repo)
1665 c = cachedlocalrepo(repo)
1663 c._state = self._state
1666 c._state = self._state
1664 c.mtime = self.mtime
1667 c.mtime = self.mtime
1665 return c
1668 return c
General Comments 0
You need to be logged in to leave comments. Login now