##// END OF EJS Templates
peer: store the path object used to build a peer from a repo...
marmoute -
r50650:2d11a98d default
parent child Browse files
Show More
@@ -1,1668 +1,1668 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11 import posixpath
11 import posixpath
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import weakref
14 import weakref
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 sha1nodeconstants,
19 sha1nodeconstants,
20 short,
20 short,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 bundlerepo,
26 bundlerepo,
27 cmdutil,
27 cmdutil,
28 destutil,
28 destutil,
29 discovery,
29 discovery,
30 error,
30 error,
31 exchange,
31 exchange,
32 extensions,
32 extensions,
33 graphmod,
33 graphmod,
34 httppeer,
34 httppeer,
35 localrepo,
35 localrepo,
36 lock,
36 lock,
37 logcmdutil,
37 logcmdutil,
38 logexchange,
38 logexchange,
39 merge as mergemod,
39 merge as mergemod,
40 mergestate as mergestatemod,
40 mergestate as mergestatemod,
41 narrowspec,
41 narrowspec,
42 phases,
42 phases,
43 requirements,
43 requirements,
44 scmutil,
44 scmutil,
45 sshpeer,
45 sshpeer,
46 statichttprepo,
46 statichttprepo,
47 ui as uimod,
47 ui as uimod,
48 unionrepo,
48 unionrepo,
49 url,
49 url,
50 util,
50 util,
51 verify as verifymod,
51 verify as verifymod,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54 from .interfaces import repository as repositorymod
54 from .interfaces import repository as repositorymod
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def addbranchrevs(lrepo, other, branches, revs):
68 def addbranchrevs(lrepo, other, branches, revs):
69 if util.safehasattr(other, 'peer'):
69 if util.safehasattr(other, 'peer'):
70 # a courtesy to callers using a localrepo for other
70 # a courtesy to callers using a localrepo for other
71 peer = other.peer()
71 peer = other.peer()
72 else:
72 else:
73 peer = other
73 peer = other
74 hashbranch, branches = branches
74 hashbranch, branches = branches
75 if not hashbranch and not branches:
75 if not hashbranch and not branches:
76 x = revs or None
76 x = revs or None
77 if revs:
77 if revs:
78 y = revs[0]
78 y = revs[0]
79 else:
79 else:
80 y = None
80 y = None
81 return x, y
81 return x, y
82 if revs:
82 if revs:
83 revs = list(revs)
83 revs = list(revs)
84 else:
84 else:
85 revs = []
85 revs = []
86
86
87 if not peer.capable(b'branchmap'):
87 if not peer.capable(b'branchmap'):
88 if branches:
88 if branches:
89 raise error.Abort(_(b"remote branch lookup not supported"))
89 raise error.Abort(_(b"remote branch lookup not supported"))
90 revs.append(hashbranch)
90 revs.append(hashbranch)
91 return revs, revs[0]
91 return revs, revs[0]
92
92
93 with peer.commandexecutor() as e:
93 with peer.commandexecutor() as e:
94 branchmap = e.callcommand(b'branchmap', {}).result()
94 branchmap = e.callcommand(b'branchmap', {}).result()
95
95
96 def primary(branch):
96 def primary(branch):
97 if branch == b'.':
97 if branch == b'.':
98 if not lrepo:
98 if not lrepo:
99 raise error.Abort(_(b"dirstate branch not accessible"))
99 raise error.Abort(_(b"dirstate branch not accessible"))
100 branch = lrepo.dirstate.branch()
100 branch = lrepo.dirstate.branch()
101 if branch in branchmap:
101 if branch in branchmap:
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 return True
103 return True
104 else:
104 else:
105 return False
105 return False
106
106
107 for branch in branches:
107 for branch in branches:
108 if not primary(branch):
108 if not primary(branch):
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 if hashbranch:
110 if hashbranch:
111 if not primary(hashbranch):
111 if not primary(hashbranch):
112 revs.append(hashbranch)
112 revs.append(hashbranch)
113 return revs, revs[0]
113 return revs, revs[0]
114
114
115
115
116 def _isfile(path):
116 def _isfile(path):
117 try:
117 try:
118 # we use os.stat() directly here instead of os.path.isfile()
118 # we use os.stat() directly here instead of os.path.isfile()
119 # because the latter started returning `False` on invalid path
119 # because the latter started returning `False` on invalid path
120 # exceptions starting in 3.8 and we care about handling
120 # exceptions starting in 3.8 and we care about handling
121 # invalid paths specially here.
121 # invalid paths specially here.
122 st = os.stat(path)
122 st = os.stat(path)
123 except ValueError as e:
123 except ValueError as e:
124 msg = stringutil.forcebytestr(e)
124 msg = stringutil.forcebytestr(e)
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 except OSError:
126 except OSError:
127 return False
127 return False
128 else:
128 else:
129 return stat.S_ISREG(st.st_mode)
129 return stat.S_ISREG(st.st_mode)
130
130
131
131
132 class LocalFactory:
132 class LocalFactory:
133 """thin wrapper to dispatch between localrepo and bundle repo"""
133 """thin wrapper to dispatch between localrepo and bundle repo"""
134
134
135 @staticmethod
135 @staticmethod
136 def islocal(path: bytes) -> bool:
136 def islocal(path: bytes) -> bool:
137 path = util.expandpath(urlutil.urllocalpath(path))
137 path = util.expandpath(urlutil.urllocalpath(path))
138 return not _isfile(path)
138 return not _isfile(path)
139
139
140 @staticmethod
140 @staticmethod
141 def instance(ui, path, *args, **kwargs):
141 def instance(ui, path, *args, **kwargs):
142 path = util.expandpath(urlutil.urllocalpath(path))
142 path = util.expandpath(urlutil.urllocalpath(path))
143 if _isfile(path):
143 if _isfile(path):
144 cls = bundlerepo
144 cls = bundlerepo
145 else:
145 else:
146 cls = localrepo
146 cls = localrepo
147 return cls.instance(ui, path, *args, **kwargs)
147 return cls.instance(ui, path, *args, **kwargs)
148
148
149
149
150 repo_schemes = {
150 repo_schemes = {
151 b'bundle': bundlerepo,
151 b'bundle': bundlerepo,
152 b'union': unionrepo,
152 b'union': unionrepo,
153 b'file': LocalFactory,
153 b'file': LocalFactory,
154 }
154 }
155
155
156 peer_schemes = {
156 peer_schemes = {
157 b'http': httppeer,
157 b'http': httppeer,
158 b'https': httppeer,
158 b'https': httppeer,
159 b'ssh': sshpeer,
159 b'ssh': sshpeer,
160 b'static-http': statichttprepo,
160 b'static-http': statichttprepo,
161 }
161 }
162
162
163
163
164 def islocal(repo):
164 def islocal(repo):
165 '''return true if repo (or path pointing to repo) is local'''
165 '''return true if repo (or path pointing to repo) is local'''
166 if isinstance(repo, bytes):
166 if isinstance(repo, bytes):
167 u = urlutil.url(repo)
167 u = urlutil.url(repo)
168 scheme = u.scheme or b'file'
168 scheme = u.scheme or b'file'
169 if scheme in peer_schemes:
169 if scheme in peer_schemes:
170 cls = peer_schemes[scheme]
170 cls = peer_schemes[scheme]
171 cls.make_peer # make sure we load the module
171 cls.make_peer # make sure we load the module
172 elif scheme in repo_schemes:
172 elif scheme in repo_schemes:
173 cls = repo_schemes[scheme]
173 cls = repo_schemes[scheme]
174 cls.instance # make sure we load the module
174 cls.instance # make sure we load the module
175 else:
175 else:
176 cls = LocalFactory
176 cls = LocalFactory
177 if util.safehasattr(cls, 'islocal'):
177 if util.safehasattr(cls, 'islocal'):
178 return cls.islocal(repo) # pytype: disable=module-attr
178 return cls.islocal(repo) # pytype: disable=module-attr
179 return False
179 return False
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 return repo.local()
181 return repo.local()
182
182
183
183
184 def openpath(ui, path, sendaccept=True):
184 def openpath(ui, path, sendaccept=True):
185 '''open path with open if local, url.open if remote'''
185 '''open path with open if local, url.open if remote'''
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 if pathurl.islocal():
187 if pathurl.islocal():
188 return util.posixfile(pathurl.localpath(), b'rb')
188 return util.posixfile(pathurl.localpath(), b'rb')
189 else:
189 else:
190 return url.open(ui, path, sendaccept=sendaccept)
190 return url.open(ui, path, sendaccept=sendaccept)
191
191
192
192
193 # a list of (ui, repo) functions called for wire peer initialization
193 # a list of (ui, repo) functions called for wire peer initialization
194 wirepeersetupfuncs = []
194 wirepeersetupfuncs = []
195
195
196
196
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 ui = getattr(obj, "ui", ui)
198 ui = getattr(obj, "ui", ui)
199 for f in presetupfuncs or []:
199 for f in presetupfuncs or []:
200 f(ui, obj)
200 f(ui, obj)
201 ui.log(b'extension', b'- executing reposetup hooks\n')
201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 with util.timedcm('all reposetup') as allreposetupstats:
202 with util.timedcm('all reposetup') as allreposetupstats:
203 for name, module in extensions.extensions(ui):
203 for name, module in extensions.extensions(ui):
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 hook = getattr(module, 'reposetup', None)
205 hook = getattr(module, 'reposetup', None)
206 if hook:
206 if hook:
207 with util.timedcm('reposetup %r', name) as stats:
207 with util.timedcm('reposetup %r', name) as stats:
208 hook(ui, obj)
208 hook(ui, obj)
209 msg = b' > reposetup for %s took %s\n'
209 msg = b' > reposetup for %s took %s\n'
210 ui.log(b'extension', msg, name, stats)
210 ui.log(b'extension', msg, name, stats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 if not obj.local():
212 if not obj.local():
213 for f in wirepeersetupfuncs:
213 for f in wirepeersetupfuncs:
214 f(ui, obj)
214 f(ui, obj)
215
215
216
216
217 def repository(
217 def repository(
218 ui,
218 ui,
219 path=b'',
219 path=b'',
220 create=False,
220 create=False,
221 presetupfuncs=None,
221 presetupfuncs=None,
222 intents=None,
222 intents=None,
223 createopts=None,
223 createopts=None,
224 ):
224 ):
225 """return a repository object for the specified path"""
225 """return a repository object for the specified path"""
226 scheme = urlutil.url(path).scheme
226 scheme = urlutil.url(path).scheme
227 if scheme is None:
227 if scheme is None:
228 scheme = b'file'
228 scheme = b'file'
229 cls = repo_schemes.get(scheme)
229 cls = repo_schemes.get(scheme)
230 if cls is None:
230 if cls is None:
231 if scheme in peer_schemes:
231 if scheme in peer_schemes:
232 raise error.Abort(_(b"repository '%s' is not local") % path)
232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 cls = LocalFactory
233 cls = LocalFactory
234 repo = cls.instance(
234 repo = cls.instance(
235 ui,
235 ui,
236 path,
236 path,
237 create,
237 create,
238 intents=intents,
238 intents=intents,
239 createopts=createopts,
239 createopts=createopts,
240 )
240 )
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 return repo.filtered(b'visible')
242 return repo.filtered(b'visible')
243
243
244
244
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
246 '''return a repository peer for the specified path'''
246 '''return a repository peer for the specified path'''
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
247 ui = getattr(uiorrepo, 'ui', uiorrepo)
248 rui = remoteui(uiorrepo, opts)
248 rui = remoteui(uiorrepo, opts)
249 if util.safehasattr(path, 'url'):
249 if util.safehasattr(path, 'url'):
250 # this is already a urlutil.path object
250 # this is already a urlutil.path object
251 peer_path = path
251 peer_path = path
252 else:
252 else:
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 if scheme in peer_schemes:
255 if scheme in peer_schemes:
256 cls = peer_schemes[scheme]
256 cls = peer_schemes[scheme]
257 peer = cls.make_peer(
257 peer = cls.make_peer(
258 rui,
258 rui,
259 peer_path.loc,
259 peer_path.loc,
260 create,
260 create,
261 intents=intents,
261 intents=intents,
262 createopts=createopts,
262 createopts=createopts,
263 )
263 )
264 _setup_repo_or_peer(rui, peer)
264 _setup_repo_or_peer(rui, peer)
265 else:
265 else:
266 # this is a repository
266 # this is a repository
267 repo_path = peer_path.loc # pytype: disable=attribute-error
267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 if not repo_path:
268 if not repo_path:
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
270 repo = repository(
270 repo = repository(
271 rui,
271 rui,
272 repo_path,
272 repo_path,
273 create,
273 create,
274 intents=intents,
274 intents=intents,
275 createopts=createopts,
275 createopts=createopts,
276 )
276 )
277 peer = repo.peer()
277 peer = repo.peer(path=peer_path)
278 return peer
278 return peer
279
279
280
280
281 def defaultdest(source):
281 def defaultdest(source):
282 """return default destination of clone if none is given
282 """return default destination of clone if none is given
283
283
284 >>> defaultdest(b'foo')
284 >>> defaultdest(b'foo')
285 'foo'
285 'foo'
286 >>> defaultdest(b'/foo/bar')
286 >>> defaultdest(b'/foo/bar')
287 'bar'
287 'bar'
288 >>> defaultdest(b'/')
288 >>> defaultdest(b'/')
289 ''
289 ''
290 >>> defaultdest(b'')
290 >>> defaultdest(b'')
291 ''
291 ''
292 >>> defaultdest(b'http://example.org/')
292 >>> defaultdest(b'http://example.org/')
293 ''
293 ''
294 >>> defaultdest(b'http://example.org/foo/')
294 >>> defaultdest(b'http://example.org/foo/')
295 'foo'
295 'foo'
296 """
296 """
297 path = urlutil.url(source).path
297 path = urlutil.url(source).path
298 if not path:
298 if not path:
299 return b''
299 return b''
300 return os.path.basename(os.path.normpath(path))
300 return os.path.basename(os.path.normpath(path))
301
301
302
302
303 def sharedreposource(repo):
303 def sharedreposource(repo):
304 """Returns repository object for source repository of a shared repo.
304 """Returns repository object for source repository of a shared repo.
305
305
306 If repo is not a shared repository, returns None.
306 If repo is not a shared repository, returns None.
307 """
307 """
308 if repo.sharedpath == repo.path:
308 if repo.sharedpath == repo.path:
309 return None
309 return None
310
310
311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
312 return repo.srcrepo
312 return repo.srcrepo
313
313
314 # the sharedpath always ends in the .hg; we want the path to the repo
314 # the sharedpath always ends in the .hg; we want the path to the repo
315 source = repo.vfs.split(repo.sharedpath)[0]
315 source = repo.vfs.split(repo.sharedpath)[0]
316 srcurl, branches = urlutil.parseurl(source)
316 srcurl, branches = urlutil.parseurl(source)
317 srcrepo = repository(repo.ui, srcurl)
317 srcrepo = repository(repo.ui, srcurl)
318 repo.srcrepo = srcrepo
318 repo.srcrepo = srcrepo
319 return srcrepo
319 return srcrepo
320
320
321
321
322 def share(
322 def share(
323 ui,
323 ui,
324 source,
324 source,
325 dest=None,
325 dest=None,
326 update=True,
326 update=True,
327 bookmarks=True,
327 bookmarks=True,
328 defaultpath=None,
328 defaultpath=None,
329 relative=False,
329 relative=False,
330 ):
330 ):
331 '''create a shared repository'''
331 '''create a shared repository'''
332
332
333 not_local_msg = _(b'can only share local repositories')
333 not_local_msg = _(b'can only share local repositories')
334 if util.safehasattr(source, 'local'):
334 if util.safehasattr(source, 'local'):
335 if source.local() is None:
335 if source.local() is None:
336 raise error.Abort(not_local_msg)
336 raise error.Abort(not_local_msg)
337 elif not islocal(source):
337 elif not islocal(source):
338 # XXX why are we getting bytes here ?
338 # XXX why are we getting bytes here ?
339 raise error.Abort(not_local_msg)
339 raise error.Abort(not_local_msg)
340
340
341 if not dest:
341 if not dest:
342 dest = defaultdest(source)
342 dest = defaultdest(source)
343 else:
343 else:
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
344 dest = urlutil.get_clone_path_obj(ui, dest).loc
345
345
346 if isinstance(source, bytes):
346 if isinstance(source, bytes):
347 source_path = urlutil.get_clone_path_obj(ui, source)
347 source_path = urlutil.get_clone_path_obj(ui, source)
348 srcrepo = repository(ui, source_path.loc)
348 srcrepo = repository(ui, source_path.loc)
349 branches = (source_path.branch, [])
349 branches = (source_path.branch, [])
350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
351 else:
351 else:
352 srcrepo = source.local()
352 srcrepo = source.local()
353 checkout = None
353 checkout = None
354
354
355 shareditems = set()
355 shareditems = set()
356 if bookmarks:
356 if bookmarks:
357 shareditems.add(sharedbookmarks)
357 shareditems.add(sharedbookmarks)
358
358
359 r = repository(
359 r = repository(
360 ui,
360 ui,
361 dest,
361 dest,
362 create=True,
362 create=True,
363 createopts={
363 createopts={
364 b'sharedrepo': srcrepo,
364 b'sharedrepo': srcrepo,
365 b'sharedrelative': relative,
365 b'sharedrelative': relative,
366 b'shareditems': shareditems,
366 b'shareditems': shareditems,
367 },
367 },
368 )
368 )
369
369
370 postshare(srcrepo, r, defaultpath=defaultpath)
370 postshare(srcrepo, r, defaultpath=defaultpath)
371 r = repository(ui, dest)
371 r = repository(ui, dest)
372 _postshareupdate(r, update, checkout=checkout)
372 _postshareupdate(r, update, checkout=checkout)
373 return r
373 return r
374
374
375
375
376 def _prependsourcehgrc(repo):
376 def _prependsourcehgrc(repo):
377 """copies the source repo config and prepend it in current repo .hg/hgrc
377 """copies the source repo config and prepend it in current repo .hg/hgrc
378 on unshare. This is only done if the share was perfomed using share safe
378 on unshare. This is only done if the share was perfomed using share safe
379 method where we share config of source in shares"""
379 method where we share config of source in shares"""
380 srcvfs = vfsmod.vfs(repo.sharedpath)
380 srcvfs = vfsmod.vfs(repo.sharedpath)
381 dstvfs = vfsmod.vfs(repo.path)
381 dstvfs = vfsmod.vfs(repo.path)
382
382
383 if not srcvfs.exists(b'hgrc'):
383 if not srcvfs.exists(b'hgrc'):
384 return
384 return
385
385
386 currentconfig = b''
386 currentconfig = b''
387 if dstvfs.exists(b'hgrc'):
387 if dstvfs.exists(b'hgrc'):
388 currentconfig = dstvfs.read(b'hgrc')
388 currentconfig = dstvfs.read(b'hgrc')
389
389
390 with dstvfs(b'hgrc', b'wb') as fp:
390 with dstvfs(b'hgrc', b'wb') as fp:
391 sourceconfig = srcvfs.read(b'hgrc')
391 sourceconfig = srcvfs.read(b'hgrc')
392 fp.write(b"# Config copied from shared source\n")
392 fp.write(b"# Config copied from shared source\n")
393 fp.write(sourceconfig)
393 fp.write(sourceconfig)
394 fp.write(b'\n')
394 fp.write(b'\n')
395 fp.write(currentconfig)
395 fp.write(currentconfig)
396
396
397
397
398 def unshare(ui, repo):
398 def unshare(ui, repo):
399 """convert a shared repository to a normal one
399 """convert a shared repository to a normal one
400
400
401 Copy the store data to the repo and remove the sharedpath data.
401 Copy the store data to the repo and remove the sharedpath data.
402
402
403 Returns a new repository object representing the unshared repository.
403 Returns a new repository object representing the unshared repository.
404
404
405 The passed repository object is not usable after this function is
405 The passed repository object is not usable after this function is
406 called.
406 called.
407 """
407 """
408
408
409 with repo.lock():
409 with repo.lock():
410 # we use locks here because if we race with commit, we
410 # we use locks here because if we race with commit, we
411 # can end up with extra data in the cloned revlogs that's
411 # can end up with extra data in the cloned revlogs that's
412 # not pointed to by changesets, thus causing verify to
412 # not pointed to by changesets, thus causing verify to
413 # fail
413 # fail
414 destlock = copystore(ui, repo, repo.path)
414 destlock = copystore(ui, repo, repo.path)
415 with destlock or util.nullcontextmanager():
415 with destlock or util.nullcontextmanager():
416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
417 # we were sharing .hg/hgrc of the share source with the current
417 # we were sharing .hg/hgrc of the share source with the current
418 # repo. We need to copy that while unsharing otherwise it can
418 # repo. We need to copy that while unsharing otherwise it can
419 # disable hooks and other checks
419 # disable hooks and other checks
420 _prependsourcehgrc(repo)
420 _prependsourcehgrc(repo)
421
421
422 sharefile = repo.vfs.join(b'sharedpath')
422 sharefile = repo.vfs.join(b'sharedpath')
423 util.rename(sharefile, sharefile + b'.old')
423 util.rename(sharefile, sharefile + b'.old')
424
424
425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
427 scmutil.writereporequirements(repo)
427 scmutil.writereporequirements(repo)
428
428
429 # Removing share changes some fundamental properties of the repo instance.
429 # Removing share changes some fundamental properties of the repo instance.
430 # So we instantiate a new repo object and operate on it rather than
430 # So we instantiate a new repo object and operate on it rather than
431 # try to keep the existing repo usable.
431 # try to keep the existing repo usable.
432 newrepo = repository(repo.baseui, repo.root, create=False)
432 newrepo = repository(repo.baseui, repo.root, create=False)
433
433
434 # TODO: figure out how to access subrepos that exist, but were previously
434 # TODO: figure out how to access subrepos that exist, but were previously
435 # removed from .hgsub
435 # removed from .hgsub
436 c = newrepo[b'.']
436 c = newrepo[b'.']
437 subs = c.substate
437 subs = c.substate
438 for s in sorted(subs):
438 for s in sorted(subs):
439 c.sub(s).unshare()
439 c.sub(s).unshare()
440
440
441 localrepo.poisonrepository(repo)
441 localrepo.poisonrepository(repo)
442
442
443 return newrepo
443 return newrepo
444
444
445
445
446 def postshare(sourcerepo, destrepo, defaultpath=None):
446 def postshare(sourcerepo, destrepo, defaultpath=None):
447 """Called after a new shared repo is created.
447 """Called after a new shared repo is created.
448
448
449 The new repo only has a requirements file and pointer to the source.
449 The new repo only has a requirements file and pointer to the source.
450 This function configures additional shared data.
450 This function configures additional shared data.
451
451
452 Extensions can wrap this function and write additional entries to
452 Extensions can wrap this function and write additional entries to
453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
454 """
454 """
455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
456 if default:
456 if default:
457 template = b'[paths]\ndefault = %s\n'
457 template = b'[paths]\ndefault = %s\n'
458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
460 with destrepo.wlock():
460 with destrepo.wlock():
461 narrowspec.copytoworkingcopy(destrepo)
461 narrowspec.copytoworkingcopy(destrepo)
462
462
463
463
464 def _postshareupdate(repo, update, checkout=None):
464 def _postshareupdate(repo, update, checkout=None):
465 """Maybe perform a working directory update after a shared repo is created.
465 """Maybe perform a working directory update after a shared repo is created.
466
466
467 ``update`` can be a boolean or a revision to update to.
467 ``update`` can be a boolean or a revision to update to.
468 """
468 """
469 if not update:
469 if not update:
470 return
470 return
471
471
472 repo.ui.status(_(b"updating working directory\n"))
472 repo.ui.status(_(b"updating working directory\n"))
473 if update is not True:
473 if update is not True:
474 checkout = update
474 checkout = update
475 for test in (checkout, b'default', b'tip'):
475 for test in (checkout, b'default', b'tip'):
476 if test is None:
476 if test is None:
477 continue
477 continue
478 try:
478 try:
479 uprev = repo.lookup(test)
479 uprev = repo.lookup(test)
480 break
480 break
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 continue
482 continue
483 _update(repo, uprev)
483 _update(repo, uprev)
484
484
485
485
486 def copystore(ui, srcrepo, destpath):
486 def copystore(ui, srcrepo, destpath):
487 """copy files from store of srcrepo in destpath
487 """copy files from store of srcrepo in destpath
488
488
489 returns destlock
489 returns destlock
490 """
490 """
491 destlock = None
491 destlock = None
492 try:
492 try:
493 hardlink = None
493 hardlink = None
494 topic = _(b'linking') if hardlink else _(b'copying')
494 topic = _(b'linking') if hardlink else _(b'copying')
495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
496 num = 0
496 num = 0
497 srcpublishing = srcrepo.publishing()
497 srcpublishing = srcrepo.publishing()
498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
499 dstvfs = vfsmod.vfs(destpath)
499 dstvfs = vfsmod.vfs(destpath)
500 for f in srcrepo.store.copylist():
500 for f in srcrepo.store.copylist():
501 if srcpublishing and f.endswith(b'phaseroots'):
501 if srcpublishing and f.endswith(b'phaseroots'):
502 continue
502 continue
503 dstbase = os.path.dirname(f)
503 dstbase = os.path.dirname(f)
504 if dstbase and not dstvfs.exists(dstbase):
504 if dstbase and not dstvfs.exists(dstbase):
505 dstvfs.mkdir(dstbase)
505 dstvfs.mkdir(dstbase)
506 if srcvfs.exists(f):
506 if srcvfs.exists(f):
507 if f.endswith(b'data'):
507 if f.endswith(b'data'):
508 # 'dstbase' may be empty (e.g. revlog format 0)
508 # 'dstbase' may be empty (e.g. revlog format 0)
509 lockfile = os.path.join(dstbase, b"lock")
509 lockfile = os.path.join(dstbase, b"lock")
510 # lock to avoid premature writing to the target
510 # lock to avoid premature writing to the target
511 destlock = lock.lock(dstvfs, lockfile)
511 destlock = lock.lock(dstvfs, lockfile)
512 hardlink, n = util.copyfiles(
512 hardlink, n = util.copyfiles(
513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
514 )
514 )
515 num += n
515 num += n
516 if hardlink:
516 if hardlink:
517 ui.debug(b"linked %d files\n" % num)
517 ui.debug(b"linked %d files\n" % num)
518 else:
518 else:
519 ui.debug(b"copied %d files\n" % num)
519 ui.debug(b"copied %d files\n" % num)
520 return destlock
520 return destlock
521 except: # re-raises
521 except: # re-raises
522 release(destlock)
522 release(destlock)
523 raise
523 raise
524
524
525
525
526 def clonewithshare(
526 def clonewithshare(
527 ui,
527 ui,
528 peeropts,
528 peeropts,
529 sharepath,
529 sharepath,
530 source,
530 source,
531 srcpeer,
531 srcpeer,
532 dest,
532 dest,
533 pull=False,
533 pull=False,
534 rev=None,
534 rev=None,
535 update=True,
535 update=True,
536 stream=False,
536 stream=False,
537 ):
537 ):
538 """Perform a clone using a shared repo.
538 """Perform a clone using a shared repo.
539
539
540 The store for the repository will be located at <sharepath>/.hg. The
540 The store for the repository will be located at <sharepath>/.hg. The
541 specified revisions will be cloned or pulled from "source". A shared repo
541 specified revisions will be cloned or pulled from "source". A shared repo
542 will be created at "dest" and a working copy will be created if "update" is
542 will be created at "dest" and a working copy will be created if "update" is
543 True.
543 True.
544 """
544 """
545 revs = None
545 revs = None
546 if rev:
546 if rev:
547 if not srcpeer.capable(b'lookup'):
547 if not srcpeer.capable(b'lookup'):
548 raise error.Abort(
548 raise error.Abort(
549 _(
549 _(
550 b"src repository does not support "
550 b"src repository does not support "
551 b"revision lookup and so doesn't "
551 b"revision lookup and so doesn't "
552 b"support clone by revision"
552 b"support clone by revision"
553 )
553 )
554 )
554 )
555
555
556 # TODO this is batchable.
556 # TODO this is batchable.
557 remoterevs = []
557 remoterevs = []
558 for r in rev:
558 for r in rev:
559 with srcpeer.commandexecutor() as e:
559 with srcpeer.commandexecutor() as e:
560 remoterevs.append(
560 remoterevs.append(
561 e.callcommand(
561 e.callcommand(
562 b'lookup',
562 b'lookup',
563 {
563 {
564 b'key': r,
564 b'key': r,
565 },
565 },
566 ).result()
566 ).result()
567 )
567 )
568 revs = remoterevs
568 revs = remoterevs
569
569
570 # Obtain a lock before checking for or cloning the pooled repo otherwise
570 # Obtain a lock before checking for or cloning the pooled repo otherwise
571 # 2 clients may race creating or populating it.
571 # 2 clients may race creating or populating it.
572 pooldir = os.path.dirname(sharepath)
572 pooldir = os.path.dirname(sharepath)
573 # lock class requires the directory to exist.
573 # lock class requires the directory to exist.
574 try:
574 try:
575 util.makedir(pooldir, False)
575 util.makedir(pooldir, False)
576 except FileExistsError:
576 except FileExistsError:
577 pass
577 pass
578
578
579 poolvfs = vfsmod.vfs(pooldir)
579 poolvfs = vfsmod.vfs(pooldir)
580 basename = os.path.basename(sharepath)
580 basename = os.path.basename(sharepath)
581
581
582 with lock.lock(poolvfs, b'%s.lock' % basename):
582 with lock.lock(poolvfs, b'%s.lock' % basename):
583 if os.path.exists(sharepath):
583 if os.path.exists(sharepath):
584 ui.status(
584 ui.status(
585 _(b'(sharing from existing pooled repository %s)\n') % basename
585 _(b'(sharing from existing pooled repository %s)\n') % basename
586 )
586 )
587 else:
587 else:
588 ui.status(
588 ui.status(
589 _(b'(sharing from new pooled repository %s)\n') % basename
589 _(b'(sharing from new pooled repository %s)\n') % basename
590 )
590 )
591 # Always use pull mode because hardlinks in share mode don't work
591 # Always use pull mode because hardlinks in share mode don't work
592 # well. Never update because working copies aren't necessary in
592 # well. Never update because working copies aren't necessary in
593 # share mode.
593 # share mode.
594 clone(
594 clone(
595 ui,
595 ui,
596 peeropts,
596 peeropts,
597 source,
597 source,
598 dest=sharepath,
598 dest=sharepath,
599 pull=True,
599 pull=True,
600 revs=rev,
600 revs=rev,
601 update=False,
601 update=False,
602 stream=stream,
602 stream=stream,
603 )
603 )
604
604
605 # Resolve the value to put in [paths] section for the source.
605 # Resolve the value to put in [paths] section for the source.
606 if islocal(source):
606 if islocal(source):
607 defaultpath = util.abspath(urlutil.urllocalpath(source))
607 defaultpath = util.abspath(urlutil.urllocalpath(source))
608 else:
608 else:
609 defaultpath = source
609 defaultpath = source
610
610
611 sharerepo = repository(ui, path=sharepath)
611 sharerepo = repository(ui, path=sharepath)
612 destrepo = share(
612 destrepo = share(
613 ui,
613 ui,
614 sharerepo,
614 sharerepo,
615 dest=dest,
615 dest=dest,
616 update=False,
616 update=False,
617 bookmarks=False,
617 bookmarks=False,
618 defaultpath=defaultpath,
618 defaultpath=defaultpath,
619 )
619 )
620
620
621 # We need to perform a pull against the dest repo to fetch bookmarks
621 # We need to perform a pull against the dest repo to fetch bookmarks
622 # and other non-store data that isn't shared by default. In the case of
622 # and other non-store data that isn't shared by default. In the case of
623 # non-existing shared repo, this means we pull from the remote twice. This
623 # non-existing shared repo, this means we pull from the remote twice. This
624 # is a bit weird. But at the time it was implemented, there wasn't an easy
624 # is a bit weird. But at the time it was implemented, there wasn't an easy
625 # way to pull just non-changegroup data.
625 # way to pull just non-changegroup data.
626 exchange.pull(destrepo, srcpeer, heads=revs)
626 exchange.pull(destrepo, srcpeer, heads=revs)
627
627
628 _postshareupdate(destrepo, update)
628 _postshareupdate(destrepo, update)
629
629
630 return srcpeer, peer(ui, peeropts, dest)
630 return srcpeer, peer(ui, peeropts, dest)
631
631
632
632
633 # Recomputing caches is often slow on big repos, so copy them.
633 # Recomputing caches is often slow on big repos, so copy them.
634 def _copycache(srcrepo, dstcachedir, fname):
634 def _copycache(srcrepo, dstcachedir, fname):
635 """copy a cache from srcrepo to destcachedir (if it exists)"""
635 """copy a cache from srcrepo to destcachedir (if it exists)"""
636 srcfname = srcrepo.cachevfs.join(fname)
636 srcfname = srcrepo.cachevfs.join(fname)
637 dstfname = os.path.join(dstcachedir, fname)
637 dstfname = os.path.join(dstcachedir, fname)
638 if os.path.exists(srcfname):
638 if os.path.exists(srcfname):
639 if not os.path.exists(dstcachedir):
639 if not os.path.exists(dstcachedir):
640 os.mkdir(dstcachedir)
640 os.mkdir(dstcachedir)
641 util.copyfile(srcfname, dstfname)
641 util.copyfile(srcfname, dstfname)
642
642
643
643
644 def clone(
644 def clone(
645 ui,
645 ui,
646 peeropts,
646 peeropts,
647 source,
647 source,
648 dest=None,
648 dest=None,
649 pull=False,
649 pull=False,
650 revs=None,
650 revs=None,
651 update=True,
651 update=True,
652 stream=False,
652 stream=False,
653 branch=None,
653 branch=None,
654 shareopts=None,
654 shareopts=None,
655 storeincludepats=None,
655 storeincludepats=None,
656 storeexcludepats=None,
656 storeexcludepats=None,
657 depth=None,
657 depth=None,
658 ):
658 ):
659 """Make a copy of an existing repository.
659 """Make a copy of an existing repository.
660
660
661 Create a copy of an existing repository in a new directory. The
661 Create a copy of an existing repository in a new directory. The
662 source and destination are URLs, as passed to the repository
662 source and destination are URLs, as passed to the repository
663 function. Returns a pair of repository peers, the source and
663 function. Returns a pair of repository peers, the source and
664 newly created destination.
664 newly created destination.
665
665
666 The location of the source is added to the new repository's
666 The location of the source is added to the new repository's
667 .hg/hgrc file, as the default to be used for future pulls and
667 .hg/hgrc file, as the default to be used for future pulls and
668 pushes.
668 pushes.
669
669
670 If an exception is raised, the partly cloned/updated destination
670 If an exception is raised, the partly cloned/updated destination
671 repository will be deleted.
671 repository will be deleted.
672
672
673 Arguments:
673 Arguments:
674
674
675 source: repository object or URL
675 source: repository object or URL
676
676
677 dest: URL of destination repository to create (defaults to base
677 dest: URL of destination repository to create (defaults to base
678 name of source repository)
678 name of source repository)
679
679
680 pull: always pull from source repository, even in local case or if the
680 pull: always pull from source repository, even in local case or if the
681 server prefers streaming
681 server prefers streaming
682
682
683 stream: stream raw data uncompressed from repository (fast over
683 stream: stream raw data uncompressed from repository (fast over
684 LAN, slow over WAN)
684 LAN, slow over WAN)
685
685
686 revs: revision to clone up to (implies pull=True)
686 revs: revision to clone up to (implies pull=True)
687
687
688 update: update working directory after clone completes, if
688 update: update working directory after clone completes, if
689 destination is local repository (True means update to default rev,
689 destination is local repository (True means update to default rev,
690 anything else is treated as a revision)
690 anything else is treated as a revision)
691
691
692 branch: branches to clone
692 branch: branches to clone
693
693
694 shareopts: dict of options to control auto sharing behavior. The "pool" key
694 shareopts: dict of options to control auto sharing behavior. The "pool" key
695 activates auto sharing mode and defines the directory for stores. The
695 activates auto sharing mode and defines the directory for stores. The
696 "mode" key determines how to construct the directory name of the shared
696 "mode" key determines how to construct the directory name of the shared
697 repository. "identity" means the name is derived from the node of the first
697 repository. "identity" means the name is derived from the node of the first
698 changeset in the repository. "remote" means the name is derived from the
698 changeset in the repository. "remote" means the name is derived from the
699 remote's path/URL. Defaults to "identity."
699 remote's path/URL. Defaults to "identity."
700
700
701 storeincludepats and storeexcludepats: sets of file patterns to include and
701 storeincludepats and storeexcludepats: sets of file patterns to include and
702 exclude in the repository copy, respectively. If not defined, all files
702 exclude in the repository copy, respectively. If not defined, all files
703 will be included (a "full" clone). Otherwise a "narrow" clone containing
703 will be included (a "full" clone). Otherwise a "narrow" clone containing
704 only the requested files will be performed. If ``storeincludepats`` is not
704 only the requested files will be performed. If ``storeincludepats`` is not
705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
706 ``path:.``. If both are empty sets, no files will be cloned.
706 ``path:.``. If both are empty sets, no files will be cloned.
707 """
707 """
708
708
709 if isinstance(source, bytes):
709 if isinstance(source, bytes):
710 src_path = urlutil.get_clone_path_obj(ui, source)
710 src_path = urlutil.get_clone_path_obj(ui, source)
711 if src_path is None:
711 if src_path is None:
712 srcpeer = peer(ui, peeropts, b'')
712 srcpeer = peer(ui, peeropts, b'')
713 origsource = source = b''
713 origsource = source = b''
714 branches = (None, branch or [])
714 branches = (None, branch or [])
715 else:
715 else:
716 srcpeer = peer(ui, peeropts, src_path)
716 srcpeer = peer(ui, peeropts, src_path)
717 origsource = src_path.rawloc
717 origsource = src_path.rawloc
718 branches = (src_path.branch, branch or [])
718 branches = (src_path.branch, branch or [])
719 source = src_path.loc
719 source = src_path.loc
720 else:
720 else:
721 if util.safehasattr(source, 'peer'):
721 if util.safehasattr(source, 'peer'):
722 srcpeer = source.peer() # in case we were called with a localrepo
722 srcpeer = source.peer() # in case we were called with a localrepo
723 else:
723 else:
724 srcpeer = source
724 srcpeer = source
725 branches = (None, branch or [])
725 branches = (None, branch or [])
726 # XXX path: simply use the peer `path` object when this become available
726 # XXX path: simply use the peer `path` object when this become available
727 origsource = source = srcpeer.url()
727 origsource = source = srcpeer.url()
728 srclock = destlock = destwlock = cleandir = None
728 srclock = destlock = destwlock = cleandir = None
729 destpeer = None
729 destpeer = None
730 try:
730 try:
731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
732
732
733 if dest is None:
733 if dest is None:
734 dest = defaultdest(source)
734 dest = defaultdest(source)
735 if dest:
735 if dest:
736 ui.status(_(b"destination directory: %s\n") % dest)
736 ui.status(_(b"destination directory: %s\n") % dest)
737 else:
737 else:
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
738 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 if dest_path is not None:
739 if dest_path is not None:
740 dest = dest_path.rawloc
740 dest = dest_path.rawloc
741 else:
741 else:
742 dest = b''
742 dest = b''
743
743
744 dest = urlutil.urllocalpath(dest)
744 dest = urlutil.urllocalpath(dest)
745 source = urlutil.urllocalpath(source)
745 source = urlutil.urllocalpath(source)
746
746
747 if not dest:
747 if not dest:
748 raise error.InputError(_(b"empty destination path is not valid"))
748 raise error.InputError(_(b"empty destination path is not valid"))
749
749
750 destvfs = vfsmod.vfs(dest, expandpath=True)
750 destvfs = vfsmod.vfs(dest, expandpath=True)
751 if destvfs.lexists():
751 if destvfs.lexists():
752 if not destvfs.isdir():
752 if not destvfs.isdir():
753 raise error.InputError(
753 raise error.InputError(
754 _(b"destination '%s' already exists") % dest
754 _(b"destination '%s' already exists") % dest
755 )
755 )
756 elif destvfs.listdir():
756 elif destvfs.listdir():
757 raise error.InputError(
757 raise error.InputError(
758 _(b"destination '%s' is not empty") % dest
758 _(b"destination '%s' is not empty") % dest
759 )
759 )
760
760
761 createopts = {}
761 createopts = {}
762 narrow = False
762 narrow = False
763
763
764 if storeincludepats is not None:
764 if storeincludepats is not None:
765 narrowspec.validatepatterns(storeincludepats)
765 narrowspec.validatepatterns(storeincludepats)
766 narrow = True
766 narrow = True
767
767
768 if storeexcludepats is not None:
768 if storeexcludepats is not None:
769 narrowspec.validatepatterns(storeexcludepats)
769 narrowspec.validatepatterns(storeexcludepats)
770 narrow = True
770 narrow = True
771
771
772 if narrow:
772 if narrow:
773 # Include everything by default if only exclusion patterns defined.
773 # Include everything by default if only exclusion patterns defined.
774 if storeexcludepats and not storeincludepats:
774 if storeexcludepats and not storeincludepats:
775 storeincludepats = {b'path:.'}
775 storeincludepats = {b'path:.'}
776
776
777 createopts[b'narrowfiles'] = True
777 createopts[b'narrowfiles'] = True
778
778
779 if depth:
779 if depth:
780 createopts[b'shallowfilestore'] = True
780 createopts[b'shallowfilestore'] = True
781
781
782 if srcpeer.capable(b'lfs-serve'):
782 if srcpeer.capable(b'lfs-serve'):
783 # Repository creation honors the config if it disabled the extension, so
783 # Repository creation honors the config if it disabled the extension, so
784 # we can't just announce that lfs will be enabled. This check avoids
784 # we can't just announce that lfs will be enabled. This check avoids
785 # saying that lfs will be enabled, and then saying it's an unknown
785 # saying that lfs will be enabled, and then saying it's an unknown
786 # feature. The lfs creation option is set in either case so that a
786 # feature. The lfs creation option is set in either case so that a
787 # requirement is added. If the extension is explicitly disabled but the
787 # requirement is added. If the extension is explicitly disabled but the
788 # requirement is set, the clone aborts early, before transferring any
788 # requirement is set, the clone aborts early, before transferring any
789 # data.
789 # data.
790 createopts[b'lfs'] = True
790 createopts[b'lfs'] = True
791
791
792 if extensions.disabled_help(b'lfs'):
792 if extensions.disabled_help(b'lfs'):
793 ui.status(
793 ui.status(
794 _(
794 _(
795 b'(remote is using large file support (lfs), but it is '
795 b'(remote is using large file support (lfs), but it is '
796 b'explicitly disabled in the local configuration)\n'
796 b'explicitly disabled in the local configuration)\n'
797 )
797 )
798 )
798 )
799 else:
799 else:
800 ui.status(
800 ui.status(
801 _(
801 _(
802 b'(remote is using large file support (lfs); lfs will '
802 b'(remote is using large file support (lfs); lfs will '
803 b'be enabled for this repository)\n'
803 b'be enabled for this repository)\n'
804 )
804 )
805 )
805 )
806
806
807 shareopts = shareopts or {}
807 shareopts = shareopts or {}
808 sharepool = shareopts.get(b'pool')
808 sharepool = shareopts.get(b'pool')
809 sharenamemode = shareopts.get(b'mode')
809 sharenamemode = shareopts.get(b'mode')
810 if sharepool and islocal(dest):
810 if sharepool and islocal(dest):
811 sharepath = None
811 sharepath = None
812 if sharenamemode == b'identity':
812 if sharenamemode == b'identity':
813 # Resolve the name from the initial changeset in the remote
813 # Resolve the name from the initial changeset in the remote
814 # repository. This returns nullid when the remote is empty. It
814 # repository. This returns nullid when the remote is empty. It
815 # raises RepoLookupError if revision 0 is filtered or otherwise
815 # raises RepoLookupError if revision 0 is filtered or otherwise
816 # not available. If we fail to resolve, sharing is not enabled.
816 # not available. If we fail to resolve, sharing is not enabled.
817 try:
817 try:
818 with srcpeer.commandexecutor() as e:
818 with srcpeer.commandexecutor() as e:
819 rootnode = e.callcommand(
819 rootnode = e.callcommand(
820 b'lookup',
820 b'lookup',
821 {
821 {
822 b'key': b'0',
822 b'key': b'0',
823 },
823 },
824 ).result()
824 ).result()
825
825
826 if rootnode != sha1nodeconstants.nullid:
826 if rootnode != sha1nodeconstants.nullid:
827 sharepath = os.path.join(sharepool, hex(rootnode))
827 sharepath = os.path.join(sharepool, hex(rootnode))
828 else:
828 else:
829 ui.status(
829 ui.status(
830 _(
830 _(
831 b'(not using pooled storage: '
831 b'(not using pooled storage: '
832 b'remote appears to be empty)\n'
832 b'remote appears to be empty)\n'
833 )
833 )
834 )
834 )
835 except error.RepoLookupError:
835 except error.RepoLookupError:
836 ui.status(
836 ui.status(
837 _(
837 _(
838 b'(not using pooled storage: '
838 b'(not using pooled storage: '
839 b'unable to resolve identity of remote)\n'
839 b'unable to resolve identity of remote)\n'
840 )
840 )
841 )
841 )
842 elif sharenamemode == b'remote':
842 elif sharenamemode == b'remote':
843 sharepath = os.path.join(
843 sharepath = os.path.join(
844 sharepool, hex(hashutil.sha1(source).digest())
844 sharepool, hex(hashutil.sha1(source).digest())
845 )
845 )
846 else:
846 else:
847 raise error.Abort(
847 raise error.Abort(
848 _(b'unknown share naming mode: %s') % sharenamemode
848 _(b'unknown share naming mode: %s') % sharenamemode
849 )
849 )
850
850
851 # TODO this is a somewhat arbitrary restriction.
851 # TODO this is a somewhat arbitrary restriction.
852 if narrow:
852 if narrow:
853 ui.status(
853 ui.status(
854 _(b'(pooled storage not supported for narrow clones)\n')
854 _(b'(pooled storage not supported for narrow clones)\n')
855 )
855 )
856 sharepath = None
856 sharepath = None
857
857
858 if sharepath:
858 if sharepath:
859 return clonewithshare(
859 return clonewithshare(
860 ui,
860 ui,
861 peeropts,
861 peeropts,
862 sharepath,
862 sharepath,
863 source,
863 source,
864 srcpeer,
864 srcpeer,
865 dest,
865 dest,
866 pull=pull,
866 pull=pull,
867 rev=revs,
867 rev=revs,
868 update=update,
868 update=update,
869 stream=stream,
869 stream=stream,
870 )
870 )
871
871
872 srcrepo = srcpeer.local()
872 srcrepo = srcpeer.local()
873
873
874 abspath = origsource
874 abspath = origsource
875 if islocal(origsource):
875 if islocal(origsource):
876 abspath = util.abspath(urlutil.urllocalpath(origsource))
876 abspath = util.abspath(urlutil.urllocalpath(origsource))
877
877
878 if islocal(dest):
878 if islocal(dest):
879 if os.path.exists(dest):
879 if os.path.exists(dest):
880 # only clean up directories we create ourselves
880 # only clean up directories we create ourselves
881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
882 cleandir = hgdir
882 cleandir = hgdir
883 else:
883 else:
884 cleandir = dest
884 cleandir = dest
885
885
886 copy = False
886 copy = False
887 if (
887 if (
888 srcrepo
888 srcrepo
889 and srcrepo.cancopy()
889 and srcrepo.cancopy()
890 and islocal(dest)
890 and islocal(dest)
891 and not phases.hassecret(srcrepo)
891 and not phases.hassecret(srcrepo)
892 ):
892 ):
893 copy = not pull and not revs
893 copy = not pull and not revs
894
894
895 # TODO this is a somewhat arbitrary restriction.
895 # TODO this is a somewhat arbitrary restriction.
896 if narrow:
896 if narrow:
897 copy = False
897 copy = False
898
898
899 if copy:
899 if copy:
900 try:
900 try:
901 # we use a lock here because if we race with commit, we
901 # we use a lock here because if we race with commit, we
902 # can end up with extra data in the cloned revlogs that's
902 # can end up with extra data in the cloned revlogs that's
903 # not pointed to by changesets, thus causing verify to
903 # not pointed to by changesets, thus causing verify to
904 # fail
904 # fail
905 srclock = srcrepo.lock(wait=False)
905 srclock = srcrepo.lock(wait=False)
906 except error.LockError:
906 except error.LockError:
907 copy = False
907 copy = False
908
908
909 if copy:
909 if copy:
910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
911
911
912 destrootpath = urlutil.urllocalpath(dest)
912 destrootpath = urlutil.urllocalpath(dest)
913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
914 localrepo.createrepository(
914 localrepo.createrepository(
915 ui,
915 ui,
916 destrootpath,
916 destrootpath,
917 requirements=dest_reqs,
917 requirements=dest_reqs,
918 )
918 )
919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
920
920
921 destwlock = destrepo.wlock()
921 destwlock = destrepo.wlock()
922 destlock = destrepo.lock()
922 destlock = destrepo.lock()
923 from . import streamclone # avoid cycle
923 from . import streamclone # avoid cycle
924
924
925 streamclone.local_copy(srcrepo, destrepo)
925 streamclone.local_copy(srcrepo, destrepo)
926
926
927 # we need to re-init the repo after manually copying the data
927 # we need to re-init the repo after manually copying the data
928 # into it
928 # into it
929 destpeer = peer(srcrepo, peeropts, dest)
929 destpeer = peer(srcrepo, peeropts, dest)
930
930
931 # make the peer aware that is it already locked
931 # make the peer aware that is it already locked
932 #
932 #
933 # important:
933 # important:
934 #
934 #
935 # We still need to release that lock at the end of the function
935 # We still need to release that lock at the end of the function
936 destpeer.local()._lockref = weakref.ref(destlock)
936 destpeer.local()._lockref = weakref.ref(destlock)
937 destpeer.local()._wlockref = weakref.ref(destwlock)
937 destpeer.local()._wlockref = weakref.ref(destwlock)
938 # dirstate also needs to be copied because `_wlockref` has a reference
938 # dirstate also needs to be copied because `_wlockref` has a reference
939 # to it: this dirstate is saved to disk when the wlock is released
939 # to it: this dirstate is saved to disk when the wlock is released
940 destpeer.local().dirstate = destrepo.dirstate
940 destpeer.local().dirstate = destrepo.dirstate
941
941
942 srcrepo.hook(
942 srcrepo.hook(
943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
944 )
944 )
945 else:
945 else:
946 try:
946 try:
947 # only pass ui when no srcrepo
947 # only pass ui when no srcrepo
948 destpeer = peer(
948 destpeer = peer(
949 srcrepo or ui,
949 srcrepo or ui,
950 peeropts,
950 peeropts,
951 dest,
951 dest,
952 create=True,
952 create=True,
953 createopts=createopts,
953 createopts=createopts,
954 )
954 )
955 except FileExistsError:
955 except FileExistsError:
956 cleandir = None
956 cleandir = None
957 raise error.Abort(_(b"destination '%s' already exists") % dest)
957 raise error.Abort(_(b"destination '%s' already exists") % dest)
958
958
959 if revs:
959 if revs:
960 if not srcpeer.capable(b'lookup'):
960 if not srcpeer.capable(b'lookup'):
961 raise error.Abort(
961 raise error.Abort(
962 _(
962 _(
963 b"src repository does not support "
963 b"src repository does not support "
964 b"revision lookup and so doesn't "
964 b"revision lookup and so doesn't "
965 b"support clone by revision"
965 b"support clone by revision"
966 )
966 )
967 )
967 )
968
968
969 # TODO this is batchable.
969 # TODO this is batchable.
970 remoterevs = []
970 remoterevs = []
971 for rev in revs:
971 for rev in revs:
972 with srcpeer.commandexecutor() as e:
972 with srcpeer.commandexecutor() as e:
973 remoterevs.append(
973 remoterevs.append(
974 e.callcommand(
974 e.callcommand(
975 b'lookup',
975 b'lookup',
976 {
976 {
977 b'key': rev,
977 b'key': rev,
978 },
978 },
979 ).result()
979 ).result()
980 )
980 )
981 revs = remoterevs
981 revs = remoterevs
982
982
983 checkout = revs[0]
983 checkout = revs[0]
984 else:
984 else:
985 revs = None
985 revs = None
986 local = destpeer.local()
986 local = destpeer.local()
987 if local:
987 if local:
988 if narrow:
988 if narrow:
989 with local.wlock(), local.lock():
989 with local.wlock(), local.lock():
990 local.setnarrowpats(storeincludepats, storeexcludepats)
990 local.setnarrowpats(storeincludepats, storeexcludepats)
991 narrowspec.copytoworkingcopy(local)
991 narrowspec.copytoworkingcopy(local)
992
992
993 u = urlutil.url(abspath)
993 u = urlutil.url(abspath)
994 defaulturl = bytes(u)
994 defaulturl = bytes(u)
995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
996 if not stream:
996 if not stream:
997 if pull:
997 if pull:
998 stream = False
998 stream = False
999 else:
999 else:
1000 stream = None
1000 stream = None
1001 # internal config: ui.quietbookmarkmove
1001 # internal config: ui.quietbookmarkmove
1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1003 with local.ui.configoverride(overrides, b'clone'):
1003 with local.ui.configoverride(overrides, b'clone'):
1004 exchange.pull(
1004 exchange.pull(
1005 local,
1005 local,
1006 srcpeer,
1006 srcpeer,
1007 heads=revs,
1007 heads=revs,
1008 streamclonerequested=stream,
1008 streamclonerequested=stream,
1009 includepats=storeincludepats,
1009 includepats=storeincludepats,
1010 excludepats=storeexcludepats,
1010 excludepats=storeexcludepats,
1011 depth=depth,
1011 depth=depth,
1012 )
1012 )
1013 elif srcrepo:
1013 elif srcrepo:
1014 # TODO lift restriction once exchange.push() accepts narrow
1014 # TODO lift restriction once exchange.push() accepts narrow
1015 # push.
1015 # push.
1016 if narrow:
1016 if narrow:
1017 raise error.Abort(
1017 raise error.Abort(
1018 _(
1018 _(
1019 b'narrow clone not available for '
1019 b'narrow clone not available for '
1020 b'remote destinations'
1020 b'remote destinations'
1021 )
1021 )
1022 )
1022 )
1023
1023
1024 exchange.push(
1024 exchange.push(
1025 srcrepo,
1025 srcrepo,
1026 destpeer,
1026 destpeer,
1027 revs=revs,
1027 revs=revs,
1028 bookmarks=srcrepo._bookmarks.keys(),
1028 bookmarks=srcrepo._bookmarks.keys(),
1029 )
1029 )
1030 else:
1030 else:
1031 raise error.Abort(
1031 raise error.Abort(
1032 _(b"clone from remote to remote not supported")
1032 _(b"clone from remote to remote not supported")
1033 )
1033 )
1034
1034
1035 cleandir = None
1035 cleandir = None
1036
1036
1037 destrepo = destpeer.local()
1037 destrepo = destpeer.local()
1038 if destrepo:
1038 if destrepo:
1039 template = uimod.samplehgrcs[b'cloned']
1039 template = uimod.samplehgrcs[b'cloned']
1040 u = urlutil.url(abspath)
1040 u = urlutil.url(abspath)
1041 u.passwd = None
1041 u.passwd = None
1042 defaulturl = bytes(u)
1042 defaulturl = bytes(u)
1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1045
1045
1046 if ui.configbool(b'experimental', b'remotenames'):
1046 if ui.configbool(b'experimental', b'remotenames'):
1047 logexchange.pullremotenames(destrepo, srcpeer)
1047 logexchange.pullremotenames(destrepo, srcpeer)
1048
1048
1049 if update:
1049 if update:
1050 if update is not True:
1050 if update is not True:
1051 with srcpeer.commandexecutor() as e:
1051 with srcpeer.commandexecutor() as e:
1052 checkout = e.callcommand(
1052 checkout = e.callcommand(
1053 b'lookup',
1053 b'lookup',
1054 {
1054 {
1055 b'key': update,
1055 b'key': update,
1056 },
1056 },
1057 ).result()
1057 ).result()
1058
1058
1059 uprev = None
1059 uprev = None
1060 status = None
1060 status = None
1061 if checkout is not None:
1061 if checkout is not None:
1062 # Some extensions (at least hg-git and hg-subversion) have
1062 # Some extensions (at least hg-git and hg-subversion) have
1063 # a peer.lookup() implementation that returns a name instead
1063 # a peer.lookup() implementation that returns a name instead
1064 # of a nodeid. We work around it here until we've figured
1064 # of a nodeid. We work around it here until we've figured
1065 # out a better solution.
1065 # out a better solution.
1066 if len(checkout) == 20 and checkout in destrepo:
1066 if len(checkout) == 20 and checkout in destrepo:
1067 uprev = checkout
1067 uprev = checkout
1068 elif scmutil.isrevsymbol(destrepo, checkout):
1068 elif scmutil.isrevsymbol(destrepo, checkout):
1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1070 else:
1070 else:
1071 if update is not True:
1071 if update is not True:
1072 try:
1072 try:
1073 uprev = destrepo.lookup(update)
1073 uprev = destrepo.lookup(update)
1074 except error.RepoLookupError:
1074 except error.RepoLookupError:
1075 pass
1075 pass
1076 if uprev is None:
1076 if uprev is None:
1077 try:
1077 try:
1078 if destrepo._activebookmark:
1078 if destrepo._activebookmark:
1079 uprev = destrepo.lookup(destrepo._activebookmark)
1079 uprev = destrepo.lookup(destrepo._activebookmark)
1080 update = destrepo._activebookmark
1080 update = destrepo._activebookmark
1081 else:
1081 else:
1082 uprev = destrepo._bookmarks[b'@']
1082 uprev = destrepo._bookmarks[b'@']
1083 update = b'@'
1083 update = b'@'
1084 bn = destrepo[uprev].branch()
1084 bn = destrepo[uprev].branch()
1085 if bn == b'default':
1085 if bn == b'default':
1086 status = _(b"updating to bookmark %s\n" % update)
1086 status = _(b"updating to bookmark %s\n" % update)
1087 else:
1087 else:
1088 status = (
1088 status = (
1089 _(b"updating to bookmark %s on branch %s\n")
1089 _(b"updating to bookmark %s on branch %s\n")
1090 ) % (update, bn)
1090 ) % (update, bn)
1091 except KeyError:
1091 except KeyError:
1092 try:
1092 try:
1093 uprev = destrepo.branchtip(b'default')
1093 uprev = destrepo.branchtip(b'default')
1094 except error.RepoLookupError:
1094 except error.RepoLookupError:
1095 uprev = destrepo.lookup(b'tip')
1095 uprev = destrepo.lookup(b'tip')
1096 if not status:
1096 if not status:
1097 bn = destrepo[uprev].branch()
1097 bn = destrepo[uprev].branch()
1098 status = _(b"updating to branch %s\n") % bn
1098 status = _(b"updating to branch %s\n") % bn
1099 destrepo.ui.status(status)
1099 destrepo.ui.status(status)
1100 _update(destrepo, uprev)
1100 _update(destrepo, uprev)
1101 if update in destrepo._bookmarks:
1101 if update in destrepo._bookmarks:
1102 bookmarks.activate(destrepo, update)
1102 bookmarks.activate(destrepo, update)
1103 if destlock is not None:
1103 if destlock is not None:
1104 release(destlock)
1104 release(destlock)
1105 if destwlock is not None:
1105 if destwlock is not None:
1106 release(destlock)
1106 release(destlock)
1107 # here is a tiny windows were someone could end up writing the
1107 # here is a tiny windows were someone could end up writing the
1108 # repository before the cache are sure to be warm. This is "fine"
1108 # repository before the cache are sure to be warm. This is "fine"
1109 # as the only "bad" outcome would be some slowness. That potential
1109 # as the only "bad" outcome would be some slowness. That potential
1110 # slowness already affect reader.
1110 # slowness already affect reader.
1111 with destrepo.lock():
1111 with destrepo.lock():
1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1113 finally:
1113 finally:
1114 release(srclock, destlock, destwlock)
1114 release(srclock, destlock, destwlock)
1115 if cleandir is not None:
1115 if cleandir is not None:
1116 shutil.rmtree(cleandir, True)
1116 shutil.rmtree(cleandir, True)
1117 if srcpeer is not None:
1117 if srcpeer is not None:
1118 srcpeer.close()
1118 srcpeer.close()
1119 if destpeer and destpeer.local() is None:
1119 if destpeer and destpeer.local() is None:
1120 destpeer.close()
1120 destpeer.close()
1121 return srcpeer, destpeer
1121 return srcpeer, destpeer
1122
1122
1123
1123
1124 def _showstats(repo, stats, quietempty=False):
1124 def _showstats(repo, stats, quietempty=False):
1125 if quietempty and stats.isempty():
1125 if quietempty and stats.isempty():
1126 return
1126 return
1127 repo.ui.status(
1127 repo.ui.status(
1128 _(
1128 _(
1129 b"%d files updated, %d files merged, "
1129 b"%d files updated, %d files merged, "
1130 b"%d files removed, %d files unresolved\n"
1130 b"%d files removed, %d files unresolved\n"
1131 )
1131 )
1132 % (
1132 % (
1133 stats.updatedcount,
1133 stats.updatedcount,
1134 stats.mergedcount,
1134 stats.mergedcount,
1135 stats.removedcount,
1135 stats.removedcount,
1136 stats.unresolvedcount,
1136 stats.unresolvedcount,
1137 )
1137 )
1138 )
1138 )
1139
1139
1140
1140
1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1142 """Update the working directory to node.
1142 """Update the working directory to node.
1143
1143
1144 When overwrite is set, changes are clobbered, merged else
1144 When overwrite is set, changes are clobbered, merged else
1145
1145
1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1147 repo.ui.deprecwarn(
1147 repo.ui.deprecwarn(
1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1149 b'5.7',
1149 b'5.7',
1150 )
1150 )
1151 return mergemod._update(
1151 return mergemod._update(
1152 repo,
1152 repo,
1153 node,
1153 node,
1154 branchmerge=False,
1154 branchmerge=False,
1155 force=overwrite,
1155 force=overwrite,
1156 labels=[b'working copy', b'destination'],
1156 labels=[b'working copy', b'destination'],
1157 updatecheck=updatecheck,
1157 updatecheck=updatecheck,
1158 )
1158 )
1159
1159
1160
1160
1161 def update(repo, node, quietempty=False, updatecheck=None):
1161 def update(repo, node, quietempty=False, updatecheck=None):
1162 """update the working directory to node"""
1162 """update the working directory to node"""
1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1164 _showstats(repo, stats, quietempty)
1164 _showstats(repo, stats, quietempty)
1165 if stats.unresolvedcount:
1165 if stats.unresolvedcount:
1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1167 return stats.unresolvedcount > 0
1167 return stats.unresolvedcount > 0
1168
1168
1169
1169
1170 # naming conflict in clone()
1170 # naming conflict in clone()
1171 _update = update
1171 _update = update
1172
1172
1173
1173
1174 def clean(repo, node, show_stats=True, quietempty=False):
1174 def clean(repo, node, show_stats=True, quietempty=False):
1175 """forcibly switch the working directory to node, clobbering changes"""
1175 """forcibly switch the working directory to node, clobbering changes"""
1176 stats = mergemod.clean_update(repo[node])
1176 stats = mergemod.clean_update(repo[node])
1177 assert stats.unresolvedcount == 0
1177 assert stats.unresolvedcount == 0
1178 if show_stats:
1178 if show_stats:
1179 _showstats(repo, stats, quietempty)
1179 _showstats(repo, stats, quietempty)
1180 return False
1180 return False
1181
1181
1182
1182
1183 # naming conflict in updatetotally()
1183 # naming conflict in updatetotally()
1184 _clean = clean
1184 _clean = clean
1185
1185
1186 _VALID_UPDATECHECKS = {
1186 _VALID_UPDATECHECKS = {
1187 mergemod.UPDATECHECK_ABORT,
1187 mergemod.UPDATECHECK_ABORT,
1188 mergemod.UPDATECHECK_NONE,
1188 mergemod.UPDATECHECK_NONE,
1189 mergemod.UPDATECHECK_LINEAR,
1189 mergemod.UPDATECHECK_LINEAR,
1190 mergemod.UPDATECHECK_NO_CONFLICT,
1190 mergemod.UPDATECHECK_NO_CONFLICT,
1191 }
1191 }
1192
1192
1193
1193
1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1195 """Update the working directory with extra care for non-file components
1195 """Update the working directory with extra care for non-file components
1196
1196
1197 This takes care of non-file components below:
1197 This takes care of non-file components below:
1198
1198
1199 :bookmark: might be advanced or (in)activated
1199 :bookmark: might be advanced or (in)activated
1200
1200
1201 This takes arguments below:
1201 This takes arguments below:
1202
1202
1203 :checkout: to which revision the working directory is updated
1203 :checkout: to which revision the working directory is updated
1204 :brev: a name, which might be a bookmark to be activated after updating
1204 :brev: a name, which might be a bookmark to be activated after updating
1205 :clean: whether changes in the working directory can be discarded
1205 :clean: whether changes in the working directory can be discarded
1206 :updatecheck: how to deal with a dirty working directory
1206 :updatecheck: how to deal with a dirty working directory
1207
1207
1208 Valid values for updatecheck are the UPDATECHECK_* constants
1208 Valid values for updatecheck are the UPDATECHECK_* constants
1209 defined in the merge module. Passing `None` will result in using the
1209 defined in the merge module. Passing `None` will result in using the
1210 configured default.
1210 configured default.
1211
1211
1212 * ABORT: abort if the working directory is dirty
1212 * ABORT: abort if the working directory is dirty
1213 * NONE: don't check (merge working directory changes into destination)
1213 * NONE: don't check (merge working directory changes into destination)
1214 * LINEAR: check that update is linear before merging working directory
1214 * LINEAR: check that update is linear before merging working directory
1215 changes into destination
1215 changes into destination
1216 * NO_CONFLICT: check that the update does not result in file merges
1216 * NO_CONFLICT: check that the update does not result in file merges
1217
1217
1218 This returns whether conflict is detected at updating or not.
1218 This returns whether conflict is detected at updating or not.
1219 """
1219 """
1220 if updatecheck is None:
1220 if updatecheck is None:
1221 updatecheck = ui.config(b'commands', b'update.check')
1221 updatecheck = ui.config(b'commands', b'update.check')
1222 if updatecheck not in _VALID_UPDATECHECKS:
1222 if updatecheck not in _VALID_UPDATECHECKS:
1223 # If not configured, or invalid value configured
1223 # If not configured, or invalid value configured
1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1225 if updatecheck not in _VALID_UPDATECHECKS:
1225 if updatecheck not in _VALID_UPDATECHECKS:
1226 raise ValueError(
1226 raise ValueError(
1227 r'Invalid updatecheck value %r (can accept %r)'
1227 r'Invalid updatecheck value %r (can accept %r)'
1228 % (updatecheck, _VALID_UPDATECHECKS)
1228 % (updatecheck, _VALID_UPDATECHECKS)
1229 )
1229 )
1230 with repo.wlock():
1230 with repo.wlock():
1231 movemarkfrom = None
1231 movemarkfrom = None
1232 warndest = False
1232 warndest = False
1233 if checkout is None:
1233 if checkout is None:
1234 updata = destutil.destupdate(repo, clean=clean)
1234 updata = destutil.destupdate(repo, clean=clean)
1235 checkout, movemarkfrom, brev = updata
1235 checkout, movemarkfrom, brev = updata
1236 warndest = True
1236 warndest = True
1237
1237
1238 if clean:
1238 if clean:
1239 ret = _clean(repo, checkout)
1239 ret = _clean(repo, checkout)
1240 else:
1240 else:
1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1242 cmdutil.bailifchanged(repo, merge=False)
1242 cmdutil.bailifchanged(repo, merge=False)
1243 updatecheck = mergemod.UPDATECHECK_NONE
1243 updatecheck = mergemod.UPDATECHECK_NONE
1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1245
1245
1246 if not ret and movemarkfrom:
1246 if not ret and movemarkfrom:
1247 if movemarkfrom == repo[b'.'].node():
1247 if movemarkfrom == repo[b'.'].node():
1248 pass # no-op update
1248 pass # no-op update
1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1251 ui.status(_(b"updating bookmark %s\n") % b)
1251 ui.status(_(b"updating bookmark %s\n") % b)
1252 else:
1252 else:
1253 # this can happen with a non-linear update
1253 # this can happen with a non-linear update
1254 b = ui.label(repo._activebookmark, b'bookmarks')
1254 b = ui.label(repo._activebookmark, b'bookmarks')
1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1256 bookmarks.deactivate(repo)
1256 bookmarks.deactivate(repo)
1257 elif brev in repo._bookmarks:
1257 elif brev in repo._bookmarks:
1258 if brev != repo._activebookmark:
1258 if brev != repo._activebookmark:
1259 b = ui.label(brev, b'bookmarks.active')
1259 b = ui.label(brev, b'bookmarks.active')
1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1261 bookmarks.activate(repo, brev)
1261 bookmarks.activate(repo, brev)
1262 elif brev:
1262 elif brev:
1263 if repo._activebookmark:
1263 if repo._activebookmark:
1264 b = ui.label(repo._activebookmark, b'bookmarks')
1264 b = ui.label(repo._activebookmark, b'bookmarks')
1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1266 bookmarks.deactivate(repo)
1266 bookmarks.deactivate(repo)
1267
1267
1268 if warndest:
1268 if warndest:
1269 destutil.statusotherdests(ui, repo)
1269 destutil.statusotherdests(ui, repo)
1270
1270
1271 return ret
1271 return ret
1272
1272
1273
1273
1274 def merge(
1274 def merge(
1275 ctx,
1275 ctx,
1276 force=False,
1276 force=False,
1277 remind=True,
1277 remind=True,
1278 labels=None,
1278 labels=None,
1279 ):
1279 ):
1280 """Branch merge with node, resolving changes. Return true if any
1280 """Branch merge with node, resolving changes. Return true if any
1281 unresolved conflicts."""
1281 unresolved conflicts."""
1282 repo = ctx.repo()
1282 repo = ctx.repo()
1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1284 _showstats(repo, stats)
1284 _showstats(repo, stats)
1285 if stats.unresolvedcount:
1285 if stats.unresolvedcount:
1286 repo.ui.status(
1286 repo.ui.status(
1287 _(
1287 _(
1288 b"use 'hg resolve' to retry unresolved file merges "
1288 b"use 'hg resolve' to retry unresolved file merges "
1289 b"or 'hg merge --abort' to abandon\n"
1289 b"or 'hg merge --abort' to abandon\n"
1290 )
1290 )
1291 )
1291 )
1292 elif remind:
1292 elif remind:
1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1294 return stats.unresolvedcount > 0
1294 return stats.unresolvedcount > 0
1295
1295
1296
1296
1297 def abortmerge(ui, repo):
1297 def abortmerge(ui, repo):
1298 ms = mergestatemod.mergestate.read(repo)
1298 ms = mergestatemod.mergestate.read(repo)
1299 if ms.active():
1299 if ms.active():
1300 # there were conflicts
1300 # there were conflicts
1301 node = ms.localctx.hex()
1301 node = ms.localctx.hex()
1302 else:
1302 else:
1303 # there were no conficts, mergestate was not stored
1303 # there were no conficts, mergestate was not stored
1304 node = repo[b'.'].hex()
1304 node = repo[b'.'].hex()
1305
1305
1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1307 stats = mergemod.clean_update(repo[node])
1307 stats = mergemod.clean_update(repo[node])
1308 assert stats.unresolvedcount == 0
1308 assert stats.unresolvedcount == 0
1309 _showstats(repo, stats)
1309 _showstats(repo, stats)
1310
1310
1311
1311
1312 def _incoming(
1312 def _incoming(
1313 displaychlist,
1313 displaychlist,
1314 subreporecurse,
1314 subreporecurse,
1315 ui,
1315 ui,
1316 repo,
1316 repo,
1317 source,
1317 source,
1318 opts,
1318 opts,
1319 buffered=False,
1319 buffered=False,
1320 subpath=None,
1320 subpath=None,
1321 ):
1321 ):
1322 """
1322 """
1323 Helper for incoming / gincoming.
1323 Helper for incoming / gincoming.
1324 displaychlist gets called with
1324 displaychlist gets called with
1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1326 and is supposed to contain only code that can't be unified.
1326 and is supposed to contain only code that can't be unified.
1327 """
1327 """
1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1329 srcs = list(srcs)
1329 srcs = list(srcs)
1330 if len(srcs) != 1:
1330 if len(srcs) != 1:
1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1332 msg %= len(srcs)
1332 msg %= len(srcs)
1333 raise error.Abort(msg)
1333 raise error.Abort(msg)
1334 path = srcs[0]
1334 path = srcs[0]
1335 if subpath is None:
1335 if subpath is None:
1336 peer_path = path
1336 peer_path = path
1337 url = path.loc
1337 url = path.loc
1338 else:
1338 else:
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1339 # XXX path: we are losing the `path` object here. Keeping it would be
1340 # valuable. For example as a "variant" as we do for pushes.
1340 # valuable. For example as a "variant" as we do for pushes.
1341 subpath = urlutil.url(subpath)
1341 subpath = urlutil.url(subpath)
1342 if subpath.isabs():
1342 if subpath.isabs():
1343 peer_path = url = bytes(subpath)
1343 peer_path = url = bytes(subpath)
1344 else:
1344 else:
1345 p = urlutil.url(path.loc)
1345 p = urlutil.url(path.loc)
1346 if p.islocal():
1346 if p.islocal():
1347 normpath = os.path.normpath
1347 normpath = os.path.normpath
1348 else:
1348 else:
1349 normpath = posixpath.normpath
1349 normpath = posixpath.normpath
1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1351 peer_path = url = bytes(p)
1351 peer_path = url = bytes(p)
1352 other = peer(repo, opts, peer_path)
1352 other = peer(repo, opts, peer_path)
1353 cleanupfn = other.close
1353 cleanupfn = other.close
1354 try:
1354 try:
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 branches = (path.branch, opts.get(b'branch', []))
1356 branches = (path.branch, opts.get(b'branch', []))
1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1358
1358
1359 if revs:
1359 if revs:
1360 revs = [other.lookup(rev) for rev in revs]
1360 revs = [other.lookup(rev) for rev in revs]
1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1363 )
1363 )
1364
1364
1365 if not chlist:
1365 if not chlist:
1366 ui.status(_(b"no changes found\n"))
1366 ui.status(_(b"no changes found\n"))
1367 return subreporecurse()
1367 return subreporecurse()
1368 ui.pager(b'incoming')
1368 ui.pager(b'incoming')
1369 displayer = logcmdutil.changesetdisplayer(
1369 displayer = logcmdutil.changesetdisplayer(
1370 ui, other, opts, buffered=buffered
1370 ui, other, opts, buffered=buffered
1371 )
1371 )
1372 displaychlist(other, chlist, displayer)
1372 displaychlist(other, chlist, displayer)
1373 displayer.close()
1373 displayer.close()
1374 finally:
1374 finally:
1375 cleanupfn()
1375 cleanupfn()
1376 subreporecurse()
1376 subreporecurse()
1377 return 0 # exit code is zero since we found incoming changes
1377 return 0 # exit code is zero since we found incoming changes
1378
1378
1379
1379
1380 def incoming(ui, repo, source, opts, subpath=None):
1380 def incoming(ui, repo, source, opts, subpath=None):
1381 def subreporecurse():
1381 def subreporecurse():
1382 ret = 1
1382 ret = 1
1383 if opts.get(b'subrepos'):
1383 if opts.get(b'subrepos'):
1384 ctx = repo[None]
1384 ctx = repo[None]
1385 for subpath in sorted(ctx.substate):
1385 for subpath in sorted(ctx.substate):
1386 sub = ctx.sub(subpath)
1386 sub = ctx.sub(subpath)
1387 ret = min(ret, sub.incoming(ui, source, opts))
1387 ret = min(ret, sub.incoming(ui, source, opts))
1388 return ret
1388 return ret
1389
1389
1390 def display(other, chlist, displayer):
1390 def display(other, chlist, displayer):
1391 limit = logcmdutil.getlimit(opts)
1391 limit = logcmdutil.getlimit(opts)
1392 if opts.get(b'newest_first'):
1392 if opts.get(b'newest_first'):
1393 chlist.reverse()
1393 chlist.reverse()
1394 count = 0
1394 count = 0
1395 for n in chlist:
1395 for n in chlist:
1396 if limit is not None and count >= limit:
1396 if limit is not None and count >= limit:
1397 break
1397 break
1398 parents = [
1398 parents = [
1399 p for p in other.changelog.parents(n) if p != repo.nullid
1399 p for p in other.changelog.parents(n) if p != repo.nullid
1400 ]
1400 ]
1401 if opts.get(b'no_merges') and len(parents) == 2:
1401 if opts.get(b'no_merges') and len(parents) == 2:
1402 continue
1402 continue
1403 count += 1
1403 count += 1
1404 displayer.show(other[n])
1404 displayer.show(other[n])
1405
1405
1406 return _incoming(
1406 return _incoming(
1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1408 )
1408 )
1409
1409
1410
1410
1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1412 out = set()
1412 out = set()
1413 others = []
1413 others = []
1414 for path in urlutil.get_push_paths(repo, ui, dests):
1414 for path in urlutil.get_push_paths(repo, ui, dests):
1415 dest = path.loc
1415 dest = path.loc
1416 if subpath is not None:
1416 if subpath is not None:
1417 subpath = urlutil.url(subpath)
1417 subpath = urlutil.url(subpath)
1418 if subpath.isabs():
1418 if subpath.isabs():
1419 dest = bytes(subpath)
1419 dest = bytes(subpath)
1420 else:
1420 else:
1421 p = urlutil.url(dest)
1421 p = urlutil.url(dest)
1422 if p.islocal():
1422 if p.islocal():
1423 normpath = os.path.normpath
1423 normpath = os.path.normpath
1424 else:
1424 else:
1425 normpath = posixpath.normpath
1425 normpath = posixpath.normpath
1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1427 dest = bytes(p)
1427 dest = bytes(p)
1428 branches = path.branch, opts.get(b'branch') or []
1428 branches = path.branch, opts.get(b'branch') or []
1429
1429
1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1432 if revs:
1432 if revs:
1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1434
1434
1435 other = peer(repo, opts, dest)
1435 other = peer(repo, opts, dest)
1436 try:
1436 try:
1437 outgoing = discovery.findcommonoutgoing(
1437 outgoing = discovery.findcommonoutgoing(
1438 repo, other, revs, force=opts.get(b'force')
1438 repo, other, revs, force=opts.get(b'force')
1439 )
1439 )
1440 o = outgoing.missing
1440 o = outgoing.missing
1441 out.update(o)
1441 out.update(o)
1442 if not o:
1442 if not o:
1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1444 others.append(other)
1444 others.append(other)
1445 except: # re-raises
1445 except: # re-raises
1446 other.close()
1446 other.close()
1447 raise
1447 raise
1448 # make sure this is ordered by revision number
1448 # make sure this is ordered by revision number
1449 outgoing_revs = list(out)
1449 outgoing_revs = list(out)
1450 cl = repo.changelog
1450 cl = repo.changelog
1451 outgoing_revs.sort(key=cl.rev)
1451 outgoing_revs.sort(key=cl.rev)
1452 return outgoing_revs, others
1452 return outgoing_revs, others
1453
1453
1454
1454
1455 def _outgoing_recurse(ui, repo, dests, opts):
1455 def _outgoing_recurse(ui, repo, dests, opts):
1456 ret = 1
1456 ret = 1
1457 if opts.get(b'subrepos'):
1457 if opts.get(b'subrepos'):
1458 ctx = repo[None]
1458 ctx = repo[None]
1459 for subpath in sorted(ctx.substate):
1459 for subpath in sorted(ctx.substate):
1460 sub = ctx.sub(subpath)
1460 sub = ctx.sub(subpath)
1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1462 return ret
1462 return ret
1463
1463
1464
1464
1465 def _outgoing_filter(repo, revs, opts):
1465 def _outgoing_filter(repo, revs, opts):
1466 """apply revision filtering/ordering option for outgoing"""
1466 """apply revision filtering/ordering option for outgoing"""
1467 limit = logcmdutil.getlimit(opts)
1467 limit = logcmdutil.getlimit(opts)
1468 no_merges = opts.get(b'no_merges')
1468 no_merges = opts.get(b'no_merges')
1469 if opts.get(b'newest_first'):
1469 if opts.get(b'newest_first'):
1470 revs.reverse()
1470 revs.reverse()
1471 if limit is None and not no_merges:
1471 if limit is None and not no_merges:
1472 for r in revs:
1472 for r in revs:
1473 yield r
1473 yield r
1474 return
1474 return
1475
1475
1476 count = 0
1476 count = 0
1477 cl = repo.changelog
1477 cl = repo.changelog
1478 for n in revs:
1478 for n in revs:
1479 if limit is not None and count >= limit:
1479 if limit is not None and count >= limit:
1480 break
1480 break
1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1482 if no_merges and len(parents) == 2:
1482 if no_merges and len(parents) == 2:
1483 continue
1483 continue
1484 count += 1
1484 count += 1
1485 yield n
1485 yield n
1486
1486
1487
1487
1488 def outgoing(ui, repo, dests, opts, subpath=None):
1488 def outgoing(ui, repo, dests, opts, subpath=None):
1489 if opts.get(b'graph'):
1489 if opts.get(b'graph'):
1490 logcmdutil.checkunsupportedgraphflags([], opts)
1490 logcmdutil.checkunsupportedgraphflags([], opts)
1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1492 ret = 1
1492 ret = 1
1493 try:
1493 try:
1494 if o:
1494 if o:
1495 ret = 0
1495 ret = 0
1496
1496
1497 if opts.get(b'graph'):
1497 if opts.get(b'graph'):
1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1499 ui.pager(b'outgoing')
1499 ui.pager(b'outgoing')
1500 displayer = logcmdutil.changesetdisplayer(
1500 displayer = logcmdutil.changesetdisplayer(
1501 ui, repo, opts, buffered=True
1501 ui, repo, opts, buffered=True
1502 )
1502 )
1503 logcmdutil.displaygraph(
1503 logcmdutil.displaygraph(
1504 ui, repo, revdag, displayer, graphmod.asciiedges
1504 ui, repo, revdag, displayer, graphmod.asciiedges
1505 )
1505 )
1506 else:
1506 else:
1507 ui.pager(b'outgoing')
1507 ui.pager(b'outgoing')
1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1509 for n in _outgoing_filter(repo, o, opts):
1509 for n in _outgoing_filter(repo, o, opts):
1510 displayer.show(repo[n])
1510 displayer.show(repo[n])
1511 displayer.close()
1511 displayer.close()
1512 for oth in others:
1512 for oth in others:
1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1515 return ret # exit code is zero since we found outgoing changes
1515 return ret # exit code is zero since we found outgoing changes
1516 finally:
1516 finally:
1517 for oth in others:
1517 for oth in others:
1518 oth.close()
1518 oth.close()
1519
1519
1520
1520
1521 def verify(repo, level=None):
1521 def verify(repo, level=None):
1522 """verify the consistency of a repository"""
1522 """verify the consistency of a repository"""
1523 ret = verifymod.verify(repo, level=level)
1523 ret = verifymod.verify(repo, level=level)
1524
1524
1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1527 # concern.
1527 # concern.
1528
1528
1529 # pathto() is needed for -R case
1529 # pathto() is needed for -R case
1530 revs = repo.revs(
1530 revs = repo.revs(
1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1532 )
1532 )
1533
1533
1534 if revs:
1534 if revs:
1535 repo.ui.status(_(b'checking subrepo links\n'))
1535 repo.ui.status(_(b'checking subrepo links\n'))
1536 for rev in revs:
1536 for rev in revs:
1537 ctx = repo[rev]
1537 ctx = repo[rev]
1538 try:
1538 try:
1539 for subpath in ctx.substate:
1539 for subpath in ctx.substate:
1540 try:
1540 try:
1541 ret = (
1541 ret = (
1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1543 )
1543 )
1544 except error.RepoError as e:
1544 except error.RepoError as e:
1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1546 except Exception:
1546 except Exception:
1547 repo.ui.warn(
1547 repo.ui.warn(
1548 _(b'.hgsubstate is corrupt in revision %s\n')
1548 _(b'.hgsubstate is corrupt in revision %s\n')
1549 % short(ctx.node())
1549 % short(ctx.node())
1550 )
1550 )
1551
1551
1552 return ret
1552 return ret
1553
1553
1554
1554
1555 def remoteui(src, opts):
1555 def remoteui(src, opts):
1556 """build a remote ui from ui or repo and opts"""
1556 """build a remote ui from ui or repo and opts"""
1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1558 dst = src.baseui.copy() # drop repo-specific config
1558 dst = src.baseui.copy() # drop repo-specific config
1559 src = src.ui # copy target options from repo
1559 src = src.ui # copy target options from repo
1560 else: # assume it's a global ui object
1560 else: # assume it's a global ui object
1561 dst = src.copy() # keep all global options
1561 dst = src.copy() # keep all global options
1562
1562
1563 # copy ssh-specific options
1563 # copy ssh-specific options
1564 for o in b'ssh', b'remotecmd':
1564 for o in b'ssh', b'remotecmd':
1565 v = opts.get(o) or src.config(b'ui', o)
1565 v = opts.get(o) or src.config(b'ui', o)
1566 if v:
1566 if v:
1567 dst.setconfig(b"ui", o, v, b'copied')
1567 dst.setconfig(b"ui", o, v, b'copied')
1568
1568
1569 # copy bundle-specific options
1569 # copy bundle-specific options
1570 r = src.config(b'bundle', b'mainreporoot')
1570 r = src.config(b'bundle', b'mainreporoot')
1571 if r:
1571 if r:
1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1573
1573
1574 # copy selected local settings to the remote ui
1574 # copy selected local settings to the remote ui
1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1576 for key, val in src.configitems(sect):
1576 for key, val in src.configitems(sect):
1577 dst.setconfig(sect, key, val, b'copied')
1577 dst.setconfig(sect, key, val, b'copied')
1578 v = src.config(b'web', b'cacerts')
1578 v = src.config(b'web', b'cacerts')
1579 if v:
1579 if v:
1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1581
1581
1582 return dst
1582 return dst
1583
1583
1584
1584
1585 # Files of interest
1585 # Files of interest
1586 # Used to check if the repository has changed looking at mtime and size of
1586 # Used to check if the repository has changed looking at mtime and size of
1587 # these files.
1587 # these files.
1588 foi = [
1588 foi = [
1589 (b'spath', b'00changelog.i'),
1589 (b'spath', b'00changelog.i'),
1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1591 (b'spath', b'obsstore'),
1591 (b'spath', b'obsstore'),
1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1593 ]
1593 ]
1594
1594
1595
1595
1596 class cachedlocalrepo:
1596 class cachedlocalrepo:
1597 """Holds a localrepository that can be cached and reused."""
1597 """Holds a localrepository that can be cached and reused."""
1598
1598
1599 def __init__(self, repo):
1599 def __init__(self, repo):
1600 """Create a new cached repo from an existing repo.
1600 """Create a new cached repo from an existing repo.
1601
1601
1602 We assume the passed in repo was recently created. If the
1602 We assume the passed in repo was recently created. If the
1603 repo has changed between when it was created and when it was
1603 repo has changed between when it was created and when it was
1604 turned into a cache, it may not refresh properly.
1604 turned into a cache, it may not refresh properly.
1605 """
1605 """
1606 assert isinstance(repo, localrepo.localrepository)
1606 assert isinstance(repo, localrepo.localrepository)
1607 self._repo = repo
1607 self._repo = repo
1608 self._state, self.mtime = self._repostate()
1608 self._state, self.mtime = self._repostate()
1609 self._filtername = repo.filtername
1609 self._filtername = repo.filtername
1610
1610
1611 def fetch(self):
1611 def fetch(self):
1612 """Refresh (if necessary) and return a repository.
1612 """Refresh (if necessary) and return a repository.
1613
1613
1614 If the cached instance is out of date, it will be recreated
1614 If the cached instance is out of date, it will be recreated
1615 automatically and returned.
1615 automatically and returned.
1616
1616
1617 Returns a tuple of the repo and a boolean indicating whether a new
1617 Returns a tuple of the repo and a boolean indicating whether a new
1618 repo instance was created.
1618 repo instance was created.
1619 """
1619 """
1620 # We compare the mtimes and sizes of some well-known files to
1620 # We compare the mtimes and sizes of some well-known files to
1621 # determine if the repo changed. This is not precise, as mtimes
1621 # determine if the repo changed. This is not precise, as mtimes
1622 # are susceptible to clock skew and imprecise filesystems and
1622 # are susceptible to clock skew and imprecise filesystems and
1623 # file content can change while maintaining the same size.
1623 # file content can change while maintaining the same size.
1624
1624
1625 state, mtime = self._repostate()
1625 state, mtime = self._repostate()
1626 if state == self._state:
1626 if state == self._state:
1627 return self._repo, False
1627 return self._repo, False
1628
1628
1629 repo = repository(self._repo.baseui, self._repo.url())
1629 repo = repository(self._repo.baseui, self._repo.url())
1630 if self._filtername:
1630 if self._filtername:
1631 self._repo = repo.filtered(self._filtername)
1631 self._repo = repo.filtered(self._filtername)
1632 else:
1632 else:
1633 self._repo = repo.unfiltered()
1633 self._repo = repo.unfiltered()
1634 self._state = state
1634 self._state = state
1635 self.mtime = mtime
1635 self.mtime = mtime
1636
1636
1637 return self._repo, True
1637 return self._repo, True
1638
1638
1639 def _repostate(self):
1639 def _repostate(self):
1640 state = []
1640 state = []
1641 maxmtime = -1
1641 maxmtime = -1
1642 for attr, fname in foi:
1642 for attr, fname in foi:
1643 prefix = getattr(self._repo, attr)
1643 prefix = getattr(self._repo, attr)
1644 p = os.path.join(prefix, fname)
1644 p = os.path.join(prefix, fname)
1645 try:
1645 try:
1646 st = os.stat(p)
1646 st = os.stat(p)
1647 except OSError:
1647 except OSError:
1648 st = os.stat(prefix)
1648 st = os.stat(prefix)
1649 state.append((st[stat.ST_MTIME], st.st_size))
1649 state.append((st[stat.ST_MTIME], st.st_size))
1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1651
1651
1652 return tuple(state), maxmtime
1652 return tuple(state), maxmtime
1653
1653
1654 def copy(self):
1654 def copy(self):
1655 """Obtain a copy of this class instance.
1655 """Obtain a copy of this class instance.
1656
1656
1657 A new localrepository instance is obtained. The new instance should be
1657 A new localrepository instance is obtained. The new instance should be
1658 completely independent of the original.
1658 completely independent of the original.
1659 """
1659 """
1660 repo = repository(self._repo.baseui, self._repo.origroot)
1660 repo = repository(self._repo.baseui, self._repo.origroot)
1661 if self._filtername:
1661 if self._filtername:
1662 repo = repo.filtered(self._filtername)
1662 repo = repo.filtered(self._filtername)
1663 else:
1663 else:
1664 repo = repo.unfiltered()
1664 repo = repo.unfiltered()
1665 c = cachedlocalrepo(repo)
1665 c = cachedlocalrepo(repo)
1666 c._state = self._state
1666 c._state = self._state
1667 c.mtime = self.mtime
1667 c.mtime = self.mtime
1668 return c
1668 return c
General Comments 0
You need to be logged in to leave comments. Login now